ngram
listlengths
0
67.8k
[ "class VersionInfo: \"\"\"Define the server's version info.\"\"\" driver_version: str server_version: str min_schema_version: int", "\"\"\"Define utilities related to eufy-websocket-ws versions.\"\"\" from dataclasses import dataclass @dataclass class VersionInfo:", "str min_schema_version: int max_schema_version: int @classmethod def from_message(cls, msg: dict) -> \"VersionInfo\": \"\"\"Create", "VersionInfo: \"\"\"Define the server's version info.\"\"\" driver_version: str server_version: str min_schema_version: int max_schema_version:", "dataclass @dataclass class VersionInfo: \"\"\"Define the server's version info.\"\"\" driver_version: str server_version: str", "-> \"VersionInfo\": \"\"\"Create an instance from a version message.\"\"\" return cls( driver_version=msg[\"driverVersion\"], server_version=msg[\"serverVersion\"],", "driver_version: str server_version: str min_schema_version: int max_schema_version: int @classmethod def from_message(cls, msg: dict)", "@dataclass class VersionInfo: \"\"\"Define the server's version info.\"\"\" driver_version: str server_version: str min_schema_version:", "from a version message.\"\"\" return cls( driver_version=msg[\"driverVersion\"], server_version=msg[\"serverVersion\"], min_schema_version=msg.get(\"minSchemaVersion\", 0), max_schema_version=msg.get(\"maxSchemaVersion\", 0), )", "max_schema_version: int @classmethod def from_message(cls, msg: dict) -> \"VersionInfo\": \"\"\"Create an instance from", "\"\"\"Define the server's version info.\"\"\" driver_version: str server_version: str min_schema_version: int max_schema_version: int", "import dataclass @dataclass class VersionInfo: \"\"\"Define the server's version info.\"\"\" driver_version: str server_version:", "from_message(cls, msg: dict) -> \"VersionInfo\": \"\"\"Create an instance from a version message.\"\"\" return", "def from_message(cls, msg: dict) -> \"VersionInfo\": \"\"\"Create an instance from a version message.\"\"\"", "msg: dict) -> \"VersionInfo\": \"\"\"Create an instance from a version message.\"\"\" return cls(", "int @classmethod def from_message(cls, msg: dict) -> \"VersionInfo\": \"\"\"Create an instance from a", "server_version: str min_schema_version: int max_schema_version: int @classmethod def from_message(cls, msg: dict) -> \"VersionInfo\":", "str server_version: str min_schema_version: int max_schema_version: int @classmethod def from_message(cls, msg: dict) ->", "info.\"\"\" driver_version: str server_version: str min_schema_version: int max_schema_version: int @classmethod def from_message(cls, msg:", "from dataclasses import dataclass @dataclass class VersionInfo: \"\"\"Define the server's version info.\"\"\" driver_version:", "dataclasses import dataclass @dataclass class VersionInfo: \"\"\"Define the server's version info.\"\"\" driver_version: str", "instance from a version message.\"\"\" return cls( driver_version=msg[\"driverVersion\"], server_version=msg[\"serverVersion\"], min_schema_version=msg.get(\"minSchemaVersion\", 0), max_schema_version=msg.get(\"maxSchemaVersion\", 0),", "version info.\"\"\" driver_version: str server_version: str min_schema_version: int max_schema_version: int @classmethod def from_message(cls,", "eufy-websocket-ws versions.\"\"\" from dataclasses import dataclass @dataclass class VersionInfo: \"\"\"Define the server's version", "related to eufy-websocket-ws versions.\"\"\" from dataclasses import dataclass @dataclass class VersionInfo: \"\"\"Define the", "<reponame>bachya/eufy-security-ws-python<gh_stars>10-100 \"\"\"Define utilities related to eufy-websocket-ws versions.\"\"\" from dataclasses import dataclass @dataclass class", "versions.\"\"\" from dataclasses import dataclass @dataclass class VersionInfo: \"\"\"Define the server's version info.\"\"\"", "to eufy-websocket-ws versions.\"\"\" from dataclasses import dataclass @dataclass class VersionInfo: \"\"\"Define the server's", "int max_schema_version: int @classmethod def from_message(cls, msg: dict) -> \"VersionInfo\": \"\"\"Create an instance", "@classmethod def from_message(cls, msg: dict) -> \"VersionInfo\": \"\"\"Create an instance from a version", "utilities related to eufy-websocket-ws versions.\"\"\" from dataclasses import dataclass @dataclass class VersionInfo: \"\"\"Define", "\"\"\"Create an instance from a version message.\"\"\" return cls( driver_version=msg[\"driverVersion\"], server_version=msg[\"serverVersion\"], min_schema_version=msg.get(\"minSchemaVersion\", 0),", "an instance from a version message.\"\"\" return cls( driver_version=msg[\"driverVersion\"], server_version=msg[\"serverVersion\"], min_schema_version=msg.get(\"minSchemaVersion\", 0), max_schema_version=msg.get(\"maxSchemaVersion\",", "the server's version info.\"\"\" driver_version: str server_version: str min_schema_version: int max_schema_version: int @classmethod", "min_schema_version: int max_schema_version: int @classmethod def from_message(cls, msg: dict) -> \"VersionInfo\": \"\"\"Create an", "dict) -> \"VersionInfo\": \"\"\"Create an instance from a version message.\"\"\" return cls( driver_version=msg[\"driverVersion\"],", "server's version info.\"\"\" driver_version: str server_version: str min_schema_version: int max_schema_version: int @classmethod def", "\"VersionInfo\": \"\"\"Create an instance from a version message.\"\"\" return cls( driver_version=msg[\"driverVersion\"], server_version=msg[\"serverVersion\"], min_schema_version=msg.get(\"minSchemaVersion\"," ]
[ "( \"{0:.2f}\".format ( n [ i - 1 ] + div ) )", "para el graficado del algoritmo import matplotlib.pyplot as plt import numpy as np", "n ) = ( 5/3 )n. # Nombres de los ejes. plt.xlabel (", "graph ( size, time ): # Título de la ventana. plt.figure ( \"Complejidad", "Función propuesta: g ( n ) = ( 5/3 )n. # Nombres de", "time ): if ( i != 0 ): n.append ( float ( \"{0:.2f}\".format", "Algoritmos 3CV2 # <NAME> # <NAME> # Práctica 1 Suma Binaria # En", "los parámetros del gráfico. t, n = parametros ( size, time ) #", ")\", color = ( 0.3, 0.4, 0.6 ), size = \"large\" ) plt.ylabel", "tiempo frente a puntos de gráfico. t, n = [ ], [ 0", "( i ) # Tamaño ( n ) parametros. for i in range", "( float ( \"{0:.2f}\".format ( n [ i - 1 ] + div", "puntos de gráfico. t, n = [ ], [ 0 ] # div:", ") = n\" ) plt.legend ( loc = \"lower right\" ) plt.show (", "\"g( n ) = ( 5/3 )n\" ) plt.plot ( n, t, 'b--',", "0.4, 0.6 ), size = \"large\" ) plt.ylabel ( \"Tiempo ( t )\",", "plt.legend ( loc = \"lower right\" ) plt.show ( ) def parametros (", "0.3, 0.4, 0.6 ), size = \"large\" ) # Gráfico. plt.plot ( n,", "= \"large\" ) # Gráfico. plt.plot ( n, _t, 'bs', label = \"g(", "las funciones para el graficado del algoritmo import matplotlib.pyplot as plt import numpy", "), size = \"large\" ) plt.ylabel ( \"Tiempo ( t )\", color =", "= float ( \"{0:.2f}\".format ( 1 / round ( time / size )", "( t )\", color = ( 0.3, 0.4, 0.6 ), size = \"large\"", "( i != 0 ): n.append ( float ( \"{0:.2f}\".format ( n [", "plt.show ( ) def parametros ( size, time ): # tiempo frente a", "/ round ( time / size ) ) ) # Tiempo ( t", "5/3 )n\" ) plt.plot ( n, t, 'g^', linewidth = 3, label =", ") = ( 5/3 )n\" ) plt.plot ( n, t, 'g^', linewidth =", "plt.ylim ( 0, time ) # Función propuesta: g ( n ) =", "): n.append ( float ( \"{0:.2f}\".format ( n [ i - 1 ]", "del algoritmo import matplotlib.pyplot as plt import numpy as np def graph (", ") # Gráfico. plt.plot ( n, _t, 'bs', label = \"g( n )", ") plt.legend ( loc = \"lower right\" ) plt.show ( ) def parametros", "= ( 5/3 )n\" ) plt.plot ( n, t, 'g^', linewidth = 3,", "= n\" ) plt.legend ( loc = \"lower right\" ) plt.show ( )", "), size = \"large\" ) # Gráfico. plt.plot ( n, _t, 'bs', label", "0.4, 0.6 ), size = \"large\" ) # Gráfico. plt.plot ( n, _t,", "in range ( time ): if ( i != 0 ): n.append (", "0 ] # div: Variable auxiliar que ayuda a trazar el gráfico. div", "'g^', linewidth = 3, label = \"E( n ) = n\" ) plt.plot", "right\" ) plt.show ( ) def parametros ( size, time ): # tiempo", "a trazar el gráfico. div = float ( \"{0:.2f}\".format ( 1 / round", "= [ ], [ 0 ] # div: Variable auxiliar que ayuda a", "i in range ( time ): t.append ( i ) # Tamaño (", "binaria\" ) # Título de la gráfica. plt.title ( \"Suma binaria:\", color =", "= \"g( n ) = ( 5/3 )n\" ) plt.plot ( n, t,", "( 0, time ) # Función propuesta: g ( n ) = (", "t )\", color = ( 0.3, 0.4, 0.6 ), size = \"large\" )", "= ( 5/3 )n\" ) plt.plot ( n, t, 'b--', linewidth = 3,", "color = ( 0.3, 0.4, 0.6 ), size = \"large\" ) # Gráfico.", "<gh_stars>1-10 #Análisis de Algoritmos 3CV2 # <NAME> # <NAME> # Práctica 1 Suma", "time / size ) ) ) # Tiempo ( t ) parametros. for", "t, 'g^', linewidth = 3, label = \"E( n ) = n\" )", "ayuda a trazar el gráfico. div = float ( \"{0:.2f}\".format ( 1 /", "1 ] + div ) ) ) # Return de valores. return t,", ") # Definir los límites del gráfico. plt.xlim ( 0, size ) plt.ylim", "se crean las funciones para el graficado del algoritmo import matplotlib.pyplot as plt", "def parametros ( size, time ): # tiempo frente a puntos de gráfico.", "= ( 0.3, 0.4, 0.6 ), size = \"large\" ) # Gráfico. plt.plot", "funciones para el graficado del algoritmo import matplotlib.pyplot as plt import numpy as", "temporal del algoritmo de suma binaria\" ) # Título de la gráfica. plt.title", "i - 1 ] + div ) ) ) # Return de valores.", "time ): # Título de la ventana. plt.figure ( \"Complejidad temporal del algoritmo", "1 Suma Binaria # En este archivo se crean las funciones para el", "\"{0:.2f}\".format ( n [ i - 1 ] + div ) ) )", "= parametros ( size, time ) # Definir los límites del gráfico. plt.xlim", "round ( time / size ) ) ) # Tiempo ( t )", "if ( i != 0 ): n.append ( float ( \"{0:.2f}\".format ( n", ") plt.plot ( n, t, 'b--', linewidth = 3, label = \"E( n", "loc = \"lower right\" ) plt.show ( ) def parametros ( size, time", ") plt.ylabel ( \"Tiempo ( t )\", color = ( 0.3, 0.4, 0.6", "size ) ) ) # Tiempo ( t ) parametros. for i in", "# Definir los límites del gráfico. plt.xlim ( 0, size ) plt.ylim (", "div: Variable auxiliar que ayuda a trazar el gráfico. div = float (", "( \"{0:.2f}\".format ( 1 / round ( time / size ) ) )", "( \"Complejidad temporal del algoritmo de suma binaria\" ) # Título de la", "label = \"E( n ) = n\" ) plt.plot ( n, _t, 'r--',", "de la gráfica. plt.title ( \"Suma binaria:\", color = ( 0.3, 0.4, 0.6", "( ) def parametros ( size, time ): # tiempo frente a puntos", "n\" ) plt.legend ( loc = \"lower right\" ) plt.show ( ) def", "g ( n ) = ( 5/3 )n. # Nombres de los ejes.", "( time ): t.append ( i ) # Tamaño ( n ) parametros.", "plt.plot ( n, _t, 'bs', label = \"g( n ) = ( 5/3", "\"{0:.2f}\".format ( 1 / round ( time / size ) ) ) #", "Título de la ventana. plt.figure ( \"Complejidad temporal del algoritmo de suma binaria\"", "que ayuda a trazar el gráfico. div = float ( \"{0:.2f}\".format ( 1", "= 3, label = \"E( n ) = n\" ) plt.plot ( n,", "( time / size ) ) ) # Tiempo ( t ) parametros.", "- 1 ] + div ) ) ) # Return de valores. return", "!= 0 ): n.append ( float ( \"{0:.2f}\".format ( n [ i -", "( size, time ) # Definir los límites del gráfico. plt.xlim ( 0,", "\"large\" ) plt.ylabel ( \"Tiempo ( t )\", color = ( 0.3, 0.4,", "plt import numpy as np def graph ( size, time ): # Título", "'bs', label = \"g( n ) = ( 5/3 )n\" ) plt.plot (", "n ) = ( 5/3 )n\" ) plt.plot ( n, t, 'g^', linewidth", ") # Función propuesta: g ( n ) = ( 5/3 )n. #", "este archivo se crean las funciones para el graficado del algoritmo import matplotlib.pyplot", "( 0.3, 0.4, 0.6 ), size = \"large\" ) # Gráfico. plt.plot (", "color = ( 0.3, 0.4, 0.6 ), size = \"large\" ) plt.ylabel (", "time ): t.append ( i ) # Tamaño ( n ) parametros. for", "\"Suma binaria:\", color = ( 0.3, 0.4, 0.6 ), weight = \"bold\" )", "algoritmo de suma binaria\" ) # Título de la gráfica. plt.title ( \"Suma", "numpy as np def graph ( size, time ): # Título de la", "frente a puntos de gráfico. t, n = [ ], [ 0 ]", "5/3 )n\" ) plt.plot ( n, t, 'b--', linewidth = 3, label =", "( 1 / round ( time / size ) ) ) # Tiempo", "gráfico. t, n = parametros ( size, time ) # Definir los límites", "n, _t, 'r--', label = \"g( n ) = ( 5/3 )n\" )", "parametros ( size, time ) # Definir los límites del gráfico. plt.xlim (", "( \"Tamaño ( n )\", color = ( 0.3, 0.4, 0.6 ), size", "# <NAME> # Práctica 1 Suma Binaria # En este archivo se crean", "3CV2 # <NAME> # <NAME> # Práctica 1 Suma Binaria # En este", "range ( time ): t.append ( i ) # Tamaño ( n )", "): if ( i != 0 ): n.append ( float ( \"{0:.2f}\".format (", "( 0.3, 0.4, 0.6 ), weight = \"bold\" ) # Construye los parámetros", "ventana. plt.figure ( \"Complejidad temporal del algoritmo de suma binaria\" ) # Título", "plt.xlim ( 0, size ) plt.ylim ( 0, time ) # Función propuesta:", "0.3, 0.4, 0.6 ), weight = \"bold\" ) # Construye los parámetros del", "algoritmo import matplotlib.pyplot as plt import numpy as np def graph ( size,", "Título de la gráfica. plt.title ( \"Suma binaria:\", color = ( 0.3, 0.4,", "de Algoritmos 3CV2 # <NAME> # <NAME> # Práctica 1 Suma Binaria #", "n, t, 'g^', linewidth = 3, label = \"E( n ) = n\"", "size = \"large\" ) # Gráfico. plt.plot ( n, _t, 'bs', label =", "= ( 0.3, 0.4, 0.6 ), size = \"large\" ) plt.ylabel ( \"Tiempo", "for i in range ( time ): t.append ( i ) # Tamaño", "0.3, 0.4, 0.6 ), size = \"large\" ) plt.ylabel ( \"Tiempo ( t", ") plt.show ( ) def parametros ( size, time ): # tiempo frente", "0 ): n.append ( float ( \"{0:.2f}\".format ( n [ i - 1", "n ) = n\" ) plt.legend ( loc = \"lower right\" ) plt.show", "time ) # Definir los límites del gráfico. plt.xlim ( 0, size )", "ejes. plt.xlabel ( \"Tamaño ( n )\", color = ( 0.3, 0.4, 0.6", "n = parametros ( size, time ) # Definir los límites del gráfico.", "Gráfico. plt.plot ( n, _t, 'bs', label = \"g( n ) = (", "size, time ) # Definir los límites del gráfico. plt.xlim ( 0, size", "_t, 'r--', label = \"g( n ) = ( 5/3 )n\" ) plt.plot", "\"g( n ) = ( 5/3 )n\" ) plt.plot ( n, t, 'g^',", "( size, time ): # tiempo frente a puntos de gráfico. t, n", "gráfica. plt.title ( \"Suma binaria:\", color = ( 0.3, 0.4, 0.6 ), weight", "label = \"E( n ) = n\" ) plt.legend ( loc = \"lower", "n = [ ], [ 0 ] # div: Variable auxiliar que ayuda", "de suma binaria\" ) # Título de la gráfica. plt.title ( \"Suma binaria:\",", "parametros. for i in range ( time ): t.append ( i ) #", "np def graph ( size, time ): # Título de la ventana. plt.figure", "weight = \"bold\" ) # Construye los parámetros del gráfico. t, n =", "size = \"large\" ) plt.ylabel ( \"Tiempo ( t )\", color = (", "( t ) parametros. for i in range ( time ): t.append (", "# En este archivo se crean las funciones para el graficado del algoritmo", "( time ): if ( i != 0 ): n.append ( float (", "parametros ( size, time ): # tiempo frente a puntos de gráfico. t,", "graficado del algoritmo import matplotlib.pyplot as plt import numpy as np def graph", "de la ventana. plt.figure ( \"Complejidad temporal del algoritmo de suma binaria\" )", ") = ( 5/3 )n\" ) plt.plot ( n, t, 'b--', linewidth =", "= 3, label = \"E( n ) = n\" ) plt.legend ( loc", "import numpy as np def graph ( size, time ): # Título de", "= ( 5/3 )n. # Nombres de los ejes. plt.xlabel ( \"Tamaño (", ")\", color = ( 0.3, 0.4, 0.6 ), size = \"large\" ) #", "3, label = \"E( n ) = n\" ) plt.legend ( loc =", ") # Tamaño ( n ) parametros. for i in range ( time", ") # Título de la gráfica. plt.title ( \"Suma binaria:\", color = (", "binaria:\", color = ( 0.3, 0.4, 0.6 ), weight = \"bold\" ) #", "( 5/3 )n\" ) plt.plot ( n, t, 'b--', linewidth = 3, label", "t, 'b--', linewidth = 3, label = \"E( n ) = n\" )", "time ): # tiempo frente a puntos de gráfico. t, n = [", "n.append ( float ( \"{0:.2f}\".format ( n [ i - 1 ] +", "( 0.3, 0.4, 0.6 ), size = \"large\" ) plt.ylabel ( \"Tiempo (", "0.6 ), size = \"large\" ) plt.ylabel ( \"Tiempo ( t )\", color", "( n, _t, 'r--', label = \"g( n ) = ( 5/3 )n\"", "[ i - 1 ] + div ) ) ) # Return de", "] # div: Variable auxiliar que ayuda a trazar el gráfico. div =", ") parametros. for i in range ( time ): if ( i !=", "la gráfica. plt.title ( \"Suma binaria:\", color = ( 0.3, 0.4, 0.6 ),", "div = float ( \"{0:.2f}\".format ( 1 / round ( time / size", "i ) # Tamaño ( n ) parametros. for i in range (", "linewidth = 3, label = \"E( n ) = n\" ) plt.plot (", "#Análisis de Algoritmos 3CV2 # <NAME> # <NAME> # Práctica 1 Suma Binaria", "( \"Tiempo ( t )\", color = ( 0.3, 0.4, 0.6 ), size", "parametros. for i in range ( time ): if ( i != 0", "= \"E( n ) = n\" ) plt.legend ( loc = \"lower right\"", "del algoritmo de suma binaria\" ) # Título de la gráfica. plt.title (", "( n ) = ( 5/3 )n. # Nombres de los ejes. plt.xlabel", "# div: Variable auxiliar que ayuda a trazar el gráfico. div = float", "i != 0 ): n.append ( float ( \"{0:.2f}\".format ( n [ i", "= \"large\" ) plt.ylabel ( \"Tiempo ( t )\", color = ( 0.3,", "size ) plt.ylim ( 0, time ) # Función propuesta: g ( n", "t, n = [ ], [ 0 ] # div: Variable auxiliar que", ")n. # Nombres de los ejes. plt.xlabel ( \"Tamaño ( n )\", color", "( size, time ): # Título de la ventana. plt.figure ( \"Complejidad temporal", "time ) # Función propuesta: g ( n ) = ( 5/3 )n.", "de gráfico. t, n = [ ], [ 0 ] # div: Variable", "( 0, size ) plt.ylim ( 0, time ) # Función propuesta: g", "plt.ylabel ( \"Tiempo ( t )\", color = ( 0.3, 0.4, 0.6 ),", ") plt.ylim ( 0, time ) # Función propuesta: g ( n )", "3, label = \"E( n ) = n\" ) plt.plot ( n, _t,", ") parametros. for i in range ( time ): t.append ( i )", "size, time ): # tiempo frente a puntos de gráfico. t, n =", "def graph ( size, time ): # Título de la ventana. plt.figure (", "( 5/3 )n\" ) plt.plot ( n, t, 'g^', linewidth = 3, label", "i in range ( time ): if ( i != 0 ): n.append", "in range ( time ): t.append ( i ) # Tamaño ( n", "/ size ) ) ) # Tiempo ( t ) parametros. for i", "gráfico. plt.xlim ( 0, size ) plt.ylim ( 0, time ) # Función", "# tiempo frente a puntos de gráfico. t, n = [ ], [", "plt.title ( \"Suma binaria:\", color = ( 0.3, 0.4, 0.6 ), weight =", "<NAME> # Práctica 1 Suma Binaria # En este archivo se crean las", ") ) # Tiempo ( t ) parametros. for i in range (", "gráfico. t, n = [ ], [ 0 ] # div: Variable auxiliar", "size, time ): # Título de la ventana. plt.figure ( \"Complejidad temporal del", ") plt.plot ( n, t, 'g^', linewidth = 3, label = \"E( n", ")n\" ) plt.plot ( n, t, 'b--', linewidth = 3, label = \"E(", "( n, _t, 'bs', label = \"g( n ) = ( 5/3 )n\"", "trazar el gráfico. div = float ( \"{0:.2f}\".format ( 1 / round (", "\"bold\" ) # Construye los parámetros del gráfico. t, n = parametros (", "plt.xlabel ( \"Tamaño ( n )\", color = ( 0.3, 0.4, 0.6 ),", "Nombres de los ejes. plt.xlabel ( \"Tamaño ( n )\", color = (", ") = n\" ) plt.plot ( n, _t, 'r--', label = \"g( n", ") # Construye los parámetros del gráfico. t, n = parametros ( size,", "label = \"g( n ) = ( 5/3 )n\" ) plt.plot ( n,", "n\" ) plt.plot ( n, _t, 'r--', label = \"g( n ) =", "plt.plot ( n, _t, 'r--', label = \"g( n ) = ( 5/3", "( \"Suma binaria:\", color = ( 0.3, 0.4, 0.6 ), weight = \"bold\"", "n [ i - 1 ] + div ) ) ) # Return", "propuesta: g ( n ) = ( 5/3 )n. # Nombres de los", "límites del gráfico. plt.xlim ( 0, size ) plt.ylim ( 0, time )", "= ( 0.3, 0.4, 0.6 ), weight = \"bold\" ) # Construye los", "n ) parametros. for i in range ( time ): if ( i", "Suma Binaria # En este archivo se crean las funciones para el graficado", "0, size ) plt.ylim ( 0, time ) # Función propuesta: g (", "], [ 0 ] # div: Variable auxiliar que ayuda a trazar el", "for i in range ( time ): if ( i != 0 ):", "= n\" ) plt.plot ( n, _t, 'r--', label = \"g( n )", "] + div ) ) ) # Return de valores. return t, n", "# Título de la ventana. plt.figure ( \"Complejidad temporal del algoritmo de suma", "Tiempo ( t ) parametros. for i in range ( time ): t.append", "as plt import numpy as np def graph ( size, time ): #", "range ( time ): if ( i != 0 ): n.append ( float", "= \"bold\" ) # Construye los parámetros del gráfico. t, n = parametros", ") ) ) # Tiempo ( t ) parametros. for i in range", ")n\" ) plt.plot ( n, t, 'g^', linewidth = 3, label = \"E(", ") # Tiempo ( t ) parametros. for i in range ( time", "'r--', label = \"g( n ) = ( 5/3 )n\" ) plt.plot (", "( 5/3 )n. # Nombres de los ejes. plt.xlabel ( \"Tamaño ( n", ") plt.plot ( n, _t, 'r--', label = \"g( n ) = (", "0, time ) # Función propuesta: g ( n ) = ( 5/3", "En este archivo se crean las funciones para el graficado del algoritmo import", ") def parametros ( size, time ): # tiempo frente a puntos de", "): # tiempo frente a puntos de gráfico. t, n = [ ],", "# Título de la gráfica. plt.title ( \"Suma binaria:\", color = ( 0.3,", "0.4, 0.6 ), weight = \"bold\" ) # Construye los parámetros del gráfico.", "_t, 'bs', label = \"g( n ) = ( 5/3 )n\" ) plt.plot", "plt.plot ( n, t, 'g^', linewidth = 3, label = \"E( n )", "color = ( 0.3, 0.4, 0.6 ), weight = \"bold\" ) # Construye", "\"Tamaño ( n )\", color = ( 0.3, 0.4, 0.6 ), size =", "de los ejes. plt.xlabel ( \"Tamaño ( n )\", color = ( 0.3,", "n, t, 'b--', linewidth = 3, label = \"E( n ) = n\"", "Variable auxiliar que ayuda a trazar el gráfico. div = float ( \"{0:.2f}\".format", "0.6 ), weight = \"bold\" ) # Construye los parámetros del gráfico. t,", "n )\", color = ( 0.3, 0.4, 0.6 ), size = \"large\" )", "suma binaria\" ) # Título de la gráfica. plt.title ( \"Suma binaria:\", color", "# Gráfico. plt.plot ( n, _t, 'bs', label = \"g( n ) =", "[ 0 ] # div: Variable auxiliar que ayuda a trazar el gráfico.", "los límites del gráfico. plt.xlim ( 0, size ) plt.ylim ( 0, time", "a puntos de gráfico. t, n = [ ], [ 0 ] #", "n, _t, 'bs', label = \"g( n ) = ( 5/3 )n\" )", "crean las funciones para el graficado del algoritmo import matplotlib.pyplot as plt import", "( n )\", color = ( 0.3, 0.4, 0.6 ), size = \"large\"", "as np def graph ( size, time ): # Título de la ventana.", "\"lower right\" ) plt.show ( ) def parametros ( size, time ): #", "), weight = \"bold\" ) # Construye los parámetros del gráfico. t, n", "'b--', linewidth = 3, label = \"E( n ) = n\" ) plt.legend", "parámetros del gráfico. t, n = parametros ( size, time ) # Definir", "float ( \"{0:.2f}\".format ( 1 / round ( time / size ) )", "Binaria # En este archivo se crean las funciones para el graficado del", "[ ], [ 0 ] # div: Variable auxiliar que ayuda a trazar", "5/3 )n. # Nombres de los ejes. plt.xlabel ( \"Tamaño ( n )\",", "\"large\" ) # Gráfico. plt.plot ( n, _t, 'bs', label = \"g( n", "<NAME> # <NAME> # Práctica 1 Suma Binaria # En este archivo se", "la ventana. plt.figure ( \"Complejidad temporal del algoritmo de suma binaria\" ) #", "plt.plot ( n, t, 'b--', linewidth = 3, label = \"E( n )", "float ( \"{0:.2f}\".format ( n [ i - 1 ] + div )", "n ) = ( 5/3 )n\" ) plt.plot ( n, t, 'b--', linewidth", "0.6 ), size = \"large\" ) # Gráfico. plt.plot ( n, _t, 'bs',", "( n, t, 'b--', linewidth = 3, label = \"E( n ) =", "linewidth = 3, label = \"E( n ) = n\" ) plt.legend (", "t ) parametros. for i in range ( time ): t.append ( i", "auxiliar que ayuda a trazar el gráfico. div = float ( \"{0:.2f}\".format (", "# Tamaño ( n ) parametros. for i in range ( time ):", "( n, t, 'g^', linewidth = 3, label = \"E( n ) =", "): # Título de la ventana. plt.figure ( \"Complejidad temporal del algoritmo de", "\"Tiempo ( t )\", color = ( 0.3, 0.4, 0.6 ), size =", "\"Complejidad temporal del algoritmo de suma binaria\" ) # Título de la gráfica.", ") = ( 5/3 )n. # Nombres de los ejes. plt.xlabel ( \"Tamaño", "# Construye los parámetros del gráfico. t, n = parametros ( size, time", "Práctica 1 Suma Binaria # En este archivo se crean las funciones para", "( n [ i - 1 ] + div ) ) ) #", "del gráfico. t, n = parametros ( size, time ) # Definir los", "): t.append ( i ) # Tamaño ( n ) parametros. for i", "# Tiempo ( t ) parametros. for i in range ( time ):", "( loc = \"lower right\" ) plt.show ( ) def parametros ( size,", "# Nombres de los ejes. plt.xlabel ( \"Tamaño ( n )\", color =", "# Función propuesta: g ( n ) = ( 5/3 )n. # Nombres", "los ejes. plt.xlabel ( \"Tamaño ( n )\", color = ( 0.3, 0.4,", "el graficado del algoritmo import matplotlib.pyplot as plt import numpy as np def", "el gráfico. div = float ( \"{0:.2f}\".format ( 1 / round ( time", "1 / round ( time / size ) ) ) # Tiempo (", "plt.figure ( \"Complejidad temporal del algoritmo de suma binaria\" ) # Título de", "n ) = n\" ) plt.plot ( n, _t, 'r--', label = \"g(", "t.append ( i ) # Tamaño ( n ) parametros. for i in", "\"E( n ) = n\" ) plt.plot ( n, _t, 'r--', label =", "= \"lower right\" ) plt.show ( ) def parametros ( size, time ):", "del gráfico. plt.xlim ( 0, size ) plt.ylim ( 0, time ) #", "Definir los límites del gráfico. plt.xlim ( 0, size ) plt.ylim ( 0,", "= \"E( n ) = n\" ) plt.plot ( n, _t, 'r--', label", "gráfico. div = float ( \"{0:.2f}\".format ( 1 / round ( time /", "Tamaño ( n ) parametros. for i in range ( time ): if", "archivo se crean las funciones para el graficado del algoritmo import matplotlib.pyplot as", "Construye los parámetros del gráfico. t, n = parametros ( size, time )", "import matplotlib.pyplot as plt import numpy as np def graph ( size, time", "matplotlib.pyplot as plt import numpy as np def graph ( size, time ):", "( n ) parametros. for i in range ( time ): if (", "# <NAME> # <NAME> # Práctica 1 Suma Binaria # En este archivo", "# Práctica 1 Suma Binaria # En este archivo se crean las funciones", "t, n = parametros ( size, time ) # Definir los límites del", "\"E( n ) = n\" ) plt.legend ( loc = \"lower right\" )" ]
[ "pandas as pd from collections import OrderedDict import rdkit.Chem as rdc from rdkit.Chem.MolStandardize", "== \"M END\\n\"): flag = 9 # could just use flag = 0(?)", "file_list_csv.append( os.path.join(subdir, os.path.splitext(file)[0] + \".csv\") ) zipped = zip(file_list_in, file_list_ok, file_list_csv) # note:", "def convert(RDF_IN_FILE: str, RDF_OK_FILE: str, RDF_CSV_FILE: str): \"\"\"original script with single file usage", "= build_empty_table(RDF_OK_FILE, SCI_REAX) #################################################################### # Here comes the actual data extraction and addition", "+= current_line.replace(\"\\n\", \" \") flag = 8 previous_line = current_line ################################ # ###", "if SCI_REAX == \"RXN:\": if re.match(\".+EXP_PROC\", previous_line) or flag == 5: # start", "no of reagents/products flag = 0 max_reagents = 0 max_products = 0 for", "Reaxys rdf file (Scifinder contains 'SCHEME' in the enumeration) Returned string is multiple", "line # get reaction ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag =", "file usage wrapped into this 'convert' function Args: RDF_IN_FILE: original input RDF file", "2.3 (Dec 28, 14:25:00 2021) Added mol sanitization and try/catch run by calling", "flag = 4 previous_line = current_line ################################ # End of file scanning #", "forgot where though. Args: RDF_IN = filename, alt. directory and subdirectories to scan", "current_line != \"$MOL\\n\" and (iterate_molecules < number_molecules): molecule[iterate_molecules].append(current_line) if current_line == \"M END\\n\":", "section. might span over multiple line if re.match(\".+NOTES\", previous_line): current_column = ( previous_line.strip().split(\"", "gets unpacked upon usage and disappears for file_in, file_ok, file_csv in zipped: print(\"Converting", "### # # This is done last, since for Scifinder # this is", "current_line ): # this is the end of experimental block flag = 9", "if re.match(\".+NOTES\", previous_line) or flag == 6: flag = 6 # start of", "reactants, z product. x = current_line.strip().split(\" \") number_reagents = int(x[0]) number_products = int(x[1])", "re.match(\".+AUTHOR\", previous_line) or flag == 8: flag = 8 if re.match(\".+AUTHOR\", previous_line): current_column", "= \"\" rxn_id = \"\" multiple_row_text = \"\" # get first line as", "# Nota bene: this will write first line of multiline columns as well", "from file to file(!) for i in range(number_molecules): molecule.append([]) if current_line == \"\\n\"", "reaction block if current_line.startswith(\"$RXN\") | flag == 1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\",", "as main table Args: in_file (str): filename of the corrected file: RDF_OK_FILE SCI_REAX", "= [] mol_string = \"\" previous_line = current_line ################################ # ######### GET single", "files: if file.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(subdir, file)) file_list_ok.append( os.path.join(subdir, os.path.splitext(file)[0] + \"_fixed.rdf\") ) file_list_csv.append(", "\")[1].replace(SCI_REAX, \"\") ) if re.match(\".+STP\", current_line): # this is the end of experimental", "or path) (optional: returns list of new filenames) @author: <NAME> (DocMinus) license: MIT", "is later reused again with open(RDF_OK_FILE, \"w\") as file_out: write_to_file = True for", "# flag = 0 # 0 = generic # 8 = authors #", "or else waaaay too many RDkit details in output RDLogger.logger().setLevel(RDLogger.CRITICAL) def fix(RDF_IN: str)", "erroneous entries (empty mols) by deleting those entries with open(RDF_IN_FILE) as file_in: seed_line", "first line as \"seed\" for upcoming loop # seed_line is later reused again", "end of fix section #################### def scifi_or_reax(in_file: str) -> str: \"\"\"Determine if Scifinder", "for later positioning of reagents smiles in table max_products (int): <> (products) \"\"\"", "start of the experimental section. spans over multiple line if re.match(\".+EXP_PROC\", previous_line): current_column", "\")[2]) flag = 0 continue # start of a new reaction block if", "0 continue # start of a new reaction block if current_line.startswith(\"$RXN\") | flag", "of fix section #################### def scifi_or_reax(in_file: str) -> str: \"\"\"Determine if Scifinder or", "string is multiple string.replace() methods, to render script independent of source Args: in_file", "= line # get reaction ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag", "in range(number_molecules): molecule.append([]) if current_line == \"\\n\" or re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # checks for", "= f.readline() return \"RXN:\" if re.match(\".+SCHEME\", line_three) else \"ROOT:\" def build_empty_table(in_file: str, SCI_REAX:", "| flag == 1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", line): # analyse the", "by calling rdf_fixer.convert(filename or path) (optional: returns list of new filenames) @author: <NAME>", "str, SCI_REAX: str): \"\"\"Scans file three times to build a pandas df used", "= smiles counter_products += 1 # reset variables iterate_molecules = 0 molecule =", "list(my_table.filter(regex=\"COPYRIGHT\")), axis=1, inplace=True ) # skip the copyright (optional) my_table.to_csv(RDF_CSV_FILE, sep=\"\\t\", header=True, index=True)", "= 0 # 0 = generic # 5 = exp procedure text over", "= generic # 9 = skip # 4 = citation rxn_id = \"\"", "# start of the Title section. might span over multiple line if re.match(\".+TITLE\",", "is not caught in the loop, hence written out here. # end of", "RDF_IN_FILE: original input RDF file including path RDF_OK_FILE: new RDF file with corrections", "in the enumeration) Returned string is multiple string.replace() methods, to render script independent", "for line in open(RDF_OK_FILE): current_line = line # get reaction ID if current_line.startswith(\"$RFMT\"):", "text potentially over multiple lines # 9 = skip rxn_id = \"\" multiple_row_text", "generic # 8 = authors # 9 = skip rxn_id = \"\" multiple_row_text", "RDF_IN = filename, alt. directory and subdirectories to scan Returns: zipped List of", "previous_line) or flag == 8: flag = 8 if re.match(\".+AUTHOR\", previous_line): current_column =", "# this is the end of author block flag = 9 my_table.loc[rxn_id, current_column]", "span over multiple line if re.match(\".+NOTES\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\")", "current_line == \"\\n\" or re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # checks for empty lines and the", "smiles = \"\" else: mol = rdc.MolFromMolBlock(mol_string, sanitize=False) if mol is None: continue", "current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Title if", "# (only Scifinder) # flag = 0 # 0 = generic # 8", "file.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(subdir, file)) file_list_ok.append( os.path.join(subdir, os.path.splitext(file)[0] + \"_fixed.rdf\") ) file_list_csv.append( os.path.join(subdir, os.path.splitext(file)[0]", "mol = rdc.MolFromMolBlock(mol_string, sanitize=False) if mol is None: continue try: rdc.SanitizeMol(mol) except ValueError", "Title section. might span over multiple line if re.match(\".+TITLE\", previous_line): current_column = (", "Notes ######## # (only Scifinder) # flag = 0 # 0 = generic", "max_reagents: max_reagents = number_reagents if number_products > max_products: max_products = number_products flag =", "correctly overwritten later on rxn_id = \"\" previous_line = seed_line for line in", "current_line.replace(\"\\n\", \" \") flag = 4 previous_line = current_line ################################ # End of", "int(x[1]) if number_reagents > max_reagents: max_reagents = number_reagents if number_products > max_products: max_products", "variables # get string replacement variable depending on source SCI_REAX = scifi_or_reax(RDF_OK_FILE) #", "or re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # checks for empty lines and the number of molecules", "later reused again with open(RDF_OK_FILE, \"w\") as file_out: write_to_file = True for current_line", "max_reagents (int): number for later positioning of reagents smiles in table max_products (int):", "[] mol_string = \"\" previous_line = current_line ################################ # ######### GET single line", "procedure text over multiple lines # 9 = skip rxn_id = \"\" multiple_row_text", "line data ########## # # Nota bene: this will write first line of", "file (incl. path) Returns: None - output are the new files. \"\"\" ##############################################################", "for mol in range(num_mols_this_instance): mol_string = \"\".join(molecule[mol]) if mol_string == \"\": smiles =", "str(i) fields.append(tmp_name) for i in range(max_products): tmp_name = \"Product\" + str(i) fields.append(tmp_name) for", "= exp procedure text over multiple lines # 9 = skip rxn_id =", "is done last, since for Scifinder # this is the last entry in", "_e: print(\"Error: \", _e) continue mol.UpdatePropertyCache(strict=False) rdc.SanitizeMol( mol, sanitizeOps=( rdc.SANITIZE_ALL ^ rdc.SANITIZE_CLEANUP ^", "Extract Notes ######## # (only Scifinder) # flag = 0 # 0 =", "if write_to_file: file_out.write(previous_line) previous_line = current_line file_out.write(previous_line) # the last line is not", "\"ROOT:\" def build_empty_table(in_file: str, SCI_REAX: str): \"\"\"Scans file three times to build a", "in $MOL block # yes, always same size within a *given file*, can", "it anyway # (less ifs and doesn't screw anything up) # flag =", "the experimental section. spans over multiple line if re.match(\".+TXT\", previous_line): current_column = (", "current_line == \"M END\\n\": iterate_molecules += 1 # end of the complete reaction", "\"$RDFILE\" ): continue # correct molecule block # True write_to_file = current_line.startswith( \"$RXN\"", "1 my_table.loc[rxn_id, current_column] = row_text.replace(\"$DATUM \", \"\") previous_line = current_line ################################ # ###", "flag = 5 else: # Reaxys if re.match(\".+TXT\", previous_line) or flag == 5:", "as rdc from rdkit.Chem.MolStandardize import rdMolStandardize from rdkit import RDLogger # Important, or", "title # 9 = skip rxn_id = \"\" multiple_row_text = \"\" previous_line =", "9 # could just use flag = 0(?) # rebuild the string of", "anything up) # flag = 0 # 0 = generic # 9 =", "re.match(\".+EXP_PROC\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+NOTES\", current_line) or", "= \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 5 else: #", "\"_fixed.rdf\") file_list_csv.append(os.path.splitext(RDF_IN)[0] + \".csv\") elif os.path.isdir(RDF_IN): for subdir, dirs, files in os.walk(RDF_IN): for", "pandas table # ############### GET MOLECULES ############# # (structure same for Reaxys and", "of molecules, for use in $MOL block # yes, always same size within", "# (only Scifinder) # flag = 0 # 0 = generic # 7", "\".csv\") elif os.path.isdir(RDF_IN): for subdir, dirs, files in os.walk(RDF_IN): for file in files:", "RDF_CSV_FILE: str): \"\"\"original script with single file usage wrapped into this 'convert' function", "os.path.join(subdir, os.path.splitext(file)[0] + \".csv\") ) zipped = zip(file_list_in, file_list_ok, file_list_csv) # note: zip", "if counter_reagents + 1 <= number_reagents: my_table.loc[ rxn_id, my_table.columns[counter_reagents] ] = smiles counter_reagents", "> max_products: max_products = number_products flag = 0 # build the column headers", "file three times to build a pandas df used as main table Args:", "since for Scifinder # this is the last entry in a file #", "as _e: print(\"Error: \", _e) continue mol.UpdatePropertyCache(strict=False) rdc.SanitizeMol( mol, sanitizeOps=( rdc.SANITIZE_ALL ^ rdc.SANITIZE_CLEANUP", "file_in, file_ok, file_csv in zipped: print(\"Converting file: \", file_in) convert(file_in, file_ok, file_csv) return", "( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+NOTES\", current_line) or re.match( \".+REFERENCE.+\", current_line ):", "entry in a file # not necessary for reaxys, but it will go", "be correctly overwritten later on rxn_id = \"\" previous_line = seed_line for line", "collections import OrderedDict import rdkit.Chem as rdc from rdkit.Chem.MolStandardize import rdMolStandardize from rdkit", "= int(x[0]) number_products = int(x[1]) if number_reagents > max_reagents: max_reagents = number_reagents if", "export to csv file format my_table = my_table.replace(pd.np.nan, \"\", regex=True) # need to", "pd from collections import OrderedDict import rdkit.Chem as rdc from rdkit.Chem.MolStandardize import rdMolStandardize", "28, 14:25:00 2021) Added mol sanitization and try/catch run by calling rdf_fixer.convert(filename or", "0 for line in open(in_file): if line.startswith(\"$RXN\") | flag == 1: flag =", "row index list_of_IDs = [] # i.e. rows for line in open(in_file): if", "alt even global variable possible instead) Returns: SCI_REAX (str): \"RXN:\" (scifinder) or string", "ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get", "current_line file_out.write(previous_line) # the last line is not caught in the loop, hence", "\", \"\") previous_line = current_line ################################ # ### Extract Experimental Procedure ### #", "enumeration) Returned string is multiple string.replace() methods, to render script independent of source", "previous_line) or flag == 6: flag = 6 # start of the Notes", "scan Returns: zipped List of the new file names Order: input_file; fixed_file; csv_file", "################################ # ######### GET single line data ########## # # Nota bene: this", "if number_reagents > max_reagents: max_reagents = number_reagents if number_products > max_products: max_products =", "corrections (if any) RDF_CSV_FILE: resulting CSV file (incl. path) Returns: None - output", "over multiple line if re.match(\".+EXP_PROC\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") )", "= current_line file_out.write(previous_line) # the last line is not caught in the loop,", "block write_to_file = not ( current_line.startswith(\"$DTYPE\") and previous_line.startswith(\"$RFMT\") ) if write_to_file: file_out.write(previous_line) previous_line", "number_reagents = int(x[0]) number_products = int(x[1]) if number_reagents > max_reagents: max_reagents = number_reagents", "= 0 # 0 = generic # 1 = start of reaction block", "flag = 0 # 0 = generic # 6 = notes, text potentially", ") if re.match(\".+NOTES\", current_line) or re.match( \".+REFERENCE.+\", current_line ): # this is the", "generic # 9 = skip # 4 = citation rxn_id = \"\" multiple_row_text", "filename of the corrected file: RDF_OK_FILE SCI_REAX (str): \"RXN:\" (scifinder) or string \"ROOT:\"", "# flag = 0 # 0 = generic # 1 = start of", "of the complete reaction block if current_line.startswith(\"$D\") & (previous_line == \"M END\\n\"): flag", "to scan Returns: zipped List of the new file names Order: input_file; fixed_file;", ") if current_line.startswith(\"$DTYPE\"): # this is the end of Notes block flag =", "for Scifinder # this is the last entry in a file # not", "scifi_or_reax(RDF_OK_FILE) # build table according to files specs. get max no of reagents", "or flag == 6: flag = 6 # start of the Notes section.", "= 9 # could just use flag = 0(?) # rebuild the string", "\"\") ) if re.match(\".+STP\", current_line): # this is the end of experimental block", "as file_out: write_to_file = True for current_line in open(RDF_IN_FILE): # prevent first line", "# Important, or else waaaay too many RDkit details in output RDLogger.logger().setLevel(RDLogger.CRITICAL) def", "now, so doesn't matter for mol in range(num_mols_this_instance): mol_string = \"\".join(molecule[mol]) if mol_string", "and use as row index list_of_IDs = [] # i.e. rows for line", "import re import pandas as pd from collections import OrderedDict import rdkit.Chem as", "for line in open(in_file): if line.startswith(\"$RFMT\"): list_of_IDs.append(line.strip().split(\" \")[2]) # determine max no of", "though. Args: RDF_IN = filename, alt. directory and subdirectories to scan Returns: zipped", "for i in range(number_molecules): molecule.append([]) if current_line == \"\\n\" or re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): #", "+ str(i) fields.append(tmp_name) for i in range(max_products): tmp_name = \"Product\" + str(i) fields.append(tmp_name)", "current_column] = row_text.replace(\"$DATUM \", \"\") previous_line = current_line ################################ # ### Extract Experimental", "flag = 7 # start of the Title section. might span over multiple", "- output are the new files. \"\"\" ############################################################## # Fix erroneous entries (empty", "in zipped: print(\"Converting file: \", file_in) convert(file_in, file_ok, file_csv) return zip(file_list_in, file_list_ok, file_list_csv)", "continue # start of a new reaction block if current_line.startswith(\"$RXN\") | flag ==", "= \"\" previous_line = current_line ################################ # ######### GET single line data ##########", "len(molecule) # should always be max_mol now, so doesn't matter for mol in", "# Fix erroneous entries (empty mols) by deleting those entries with open(RDF_IN_FILE) as", "# prevent first line from being written twice if current_line.startswith(\"$RDFILE\") and previous_line.startswith( \"$RDFILE\"", "else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 6 previous_line = current_line ################################", "doesn't screw anything up) # flag = 0 # 0 = generic #", "# end of fix section #################### def scifi_or_reax(in_file: str) -> str: \"\"\"Determine if", "the \" y z\" line. # implies: y reactants, z products. x =", "| (flag == 2): flag = 2 if current_line != \"$MOL\\n\" and (iterate_molecules", "\"\"\" f = open(in_file) NUMBER_OF_LINES = 3 for i in range(NUMBER_OF_LINES): line_three =", "source Args: in_file (str): filename of the corrected file (in principle, the original", "ValueError as _e: print(\"Error: \", _e) continue mol.UpdatePropertyCache(strict=False) rdc.SanitizeMol( mol, sanitizeOps=( rdc.SANITIZE_ALL ^", "mol = rdMolStandardize.Normalize(mol) smiles = rdc.MolToSmiles(mol) # some mols might be empty, this", "previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this is the end of author", "line of multiline columns as well # but doesn't matter since those will", "of citation block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\"", "actual data extraction and addition to pandas table # ############### GET MOLECULES #############", "the last line is not caught in the loop, hence written out here.", "# after determining a block, find the molecules within the block if (current_line", "experimental section if SCI_REAX == \"RXN:\": if re.match(\".+EXP_PROC\", previous_line) or flag == 5:", "the Title section. might span over multiple line if re.match(\".+TITLE\", previous_line): current_column =", "current_line.startswith(\"$DTYPE\"): # this is the end of author block flag = 9 my_table.loc[rxn_id,", "SCI_REAX (str): \"RXN:\" (scifinder) or string \"ROOT:\" (reaxys) \"\"\" f = open(in_file) NUMBER_OF_LINES", "(scifinder) or string \"ROOT:\" (reaxys) used in replacements Returns: da_table (object): the (empty)", "line in open(in_file): if line.startswith(\"$RFMT\"): list_of_IDs.append(line.strip().split(\" \")[2]) # determine max no of reagents/products", "line in open(in_file): if line.startswith(\"$RXN\") | flag == 1: flag = 1 if", "# this is the last entry in a file # not necessary for", "MIT License Copyright (c) 2021 DocMinus \"\"\" import os import re import pandas", "need to remove NaN my_table.drop( list(my_table.filter(regex=\"COPYRIGHT\")), axis=1, inplace=True ) # skip the copyright", "use flag = 0(?) # rebuild the string of a molecule counter_reagents =", "start of the Title section. might span over multiple line if re.match(\".+TITLE\", previous_line):", "# this is the end of title block flag = 9 my_table.loc[rxn_id, current_column]", "unpacked upon usage and disappears for file_in, file_ok, file_csv in zipped: print(\"Converting file:", "seed_line is later reused again with open(RDF_OK_FILE, \"w\") as file_out: write_to_file = True", "reagents smiles in table max_products (int): <> (products) \"\"\" # get the IDs", "current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # start of a", "multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 5 else: # Reaxys if re.match(\".+TXT\",", "citation rxn_id = \"\" multiple_row_text = \"\" previous_line = seed_line for line in", "flag = 0 # 0 = generic # 5 = exp procedure text", "\")[2]) flag = 0 continue # Get Notes if re.match(\".+NOTES\", previous_line) or flag", "this is the end of citation block flag = 9 my_table.loc[rxn_id, current_column] =", "returns list of new filenames) @author: <NAME> (DocMinus) license: MIT License Copyright (c)", "return \"RXN:\" if re.match(\".+SCHEME\", line_three) else \"ROOT:\" def build_empty_table(in_file: str, SCI_REAX: str): \"\"\"Scans", "(reaxys) used in replacements Returns: da_table (object): the (empty) pandas df working table", "str) -> \"zipped\": \"\"\"Retrieving all .RDF files in a subdirectory recursively. Then submit", "= 4 previous_line = current_line ################################ # End of file scanning # ############################################", "multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 8 previous_line", "= 2 if current_line != \"$MOL\\n\" and (iterate_molecules < number_molecules): molecule[iterate_molecules].append(current_line) if current_line", "those will be correctly overwritten later on rxn_id = \"\" previous_line = seed_line", "\") flag = 6 previous_line = current_line ################################ # ######## Extract title ########", "fresh list of max no of molecules, for use in $MOL block #", "previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this is", "out here. # end of fix section #################### def scifi_or_reax(in_file: str) -> str:", "previous_line = current_line ################################ # ######## Extract title ######## # (only Scifinder) #", "mol in range(num_mols_this_instance): mol_string = \"\".join(molecule[mol]) if mol_string == \"\": smiles = \"\"", "on Reddit somewhere, forgot where though. Args: RDF_IN = filename, alt. directory and", "try/catch run by calling rdf_fixer.convert(filename or path) (optional: returns list of new filenames)", "1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # analyse the \" y z\" line. Not hard-coding", "well; alt even global variable possible instead) Returns: SCI_REAX (str): \"RXN:\" (scifinder) or", "of reagents & products at the same time. my_table, max_reagents, max_products = build_empty_table(RDF_OK_FILE,", "my_table = my_table.replace(pd.np.nan, \"\", regex=True) # need to remove NaN my_table.drop( list(my_table.filter(regex=\"COPYRIGHT\")), axis=1,", "flag = 0 # 0 = generic # 7 = title # 9", "remove NaN my_table.drop( list(my_table.filter(regex=\"COPYRIGHT\")), axis=1, inplace=True ) # skip the copyright (optional) my_table.to_csv(RDF_CSV_FILE,", "Table and diverse variables # get string replacement variable depending on source SCI_REAX", "= number_reagents if number_products > max_products: max_products = number_products flag = 0 #", "= \"\" previous_line = seed_line for line in open(RDF_OK_FILE): current_line = line #", "calling rdf_fixer.convert(filename or path) (optional: returns list of new filenames) @author: <NAME> (DocMinus)", "MOL (molecules) # 9 = skip molecule = [] number_reagents = 0 number_products", "used as main table Args: in_file (str): filename of the corrected file: RDF_OK_FILE", "# end of script # one could add a return value for better", "of the experimental section. spans over multiple line if re.match(\".+EXP_PROC\", previous_line): current_column =", "2): flag = 2 if current_line != \"$MOL\\n\" and (iterate_molecules < number_molecules): molecule[iterate_molecules].append(current_line)", "ifs and doesn't screw anything up) # flag = 0 # 0 =", "if line.startswith(\"$RXN\") | flag == 1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", line): #", "or string \"ROOT:\" (reaxys) used in replacements Returns: da_table (object): the (empty) pandas", "file_list_csv = [] if os.path.isfile(RDF_IN): if RDF_IN.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(RDF_IN)) file_list_ok.append(os.path.splitext(RDF_IN)[0] + \"_fixed.rdf\") file_list_csv.append(os.path.splitext(RDF_IN)[0]", "0 iterate_molecules = 0 mol_string = \"\" rxn_id = \"\" multiple_row_text = \"\"", "current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Authors if", "upcoming loop previous_line = seed_line for line in open(RDF_OK_FILE): current_line = line #", "list_of_IDs.append(line.strip().split(\" \")[2]) # determine max no of reagents/products flag = 0 max_reagents =", "7 # start of the Title section. might span over multiple line if", "| flag == 1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # analyse the", "previous_line.startswith(\"$RFMT\") ) if write_to_file: file_out.write(previous_line) previous_line = current_line file_out.write(previous_line) # the last line", "== \"M END\\n\": iterate_molecules += 1 # end of the complete reaction block", "Notes block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\" )", "some mols might be empty, this if/else positions reagents/products accordingly if counter_reagents +", "str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Citation if re.match(\".+CITATION\", previous_line) or", "in open(RDF_IN_FILE): # prevent first line from being written twice if current_line.startswith(\"$RDFILE\") and", "loop, hence written out here. # end of fix section #################### def scifi_or_reax(in_file:", "the complete reaction block if current_line.startswith(\"$D\") & (previous_line == \"M END\\n\"): flag =", "Returns: SCI_REAX (str): \"RXN:\" (scifinder) or string \"ROOT:\" (reaxys) \"\"\" f = open(in_file)", "# 8 = authors # 9 = skip rxn_id = \"\" multiple_row_text =", "+ \".csv\") elif os.path.isdir(RDF_IN): for subdir, dirs, files in os.walk(RDF_IN): for file in", "line. Not hard-coding this since it might change? # implies: y reactants, z", "block, find the molecules within the block if (current_line == \"$MOL\\n\") | (flag", "\"Reagent\" + str(i) fields.append(tmp_name) for i in range(max_products): tmp_name = \"Product\" + str(i)", "and current_line.startswith(\"$DATUM\"): current_column = previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") row_text = current_line.replace(\"\\n\", \" \") #", "row_text.replace(\"$DATUM \", \"\") previous_line = current_line ################################ # ### Extract Experimental Procedure ###", "\")[2]) # determine max no of reagents/products flag = 0 max_reagents = 0", "\".csv\") ) zipped = zip(file_list_in, file_list_ok, file_list_csv) # note: zip gets unpacked upon", "(str): \"RXN:\" (scifinder) or string \"ROOT:\" (reaxys) used in replacements Returns: da_table (object):", "!= \"$MOL\\n\" and (iterate_molecules < number_molecules): molecule[iterate_molecules].append(current_line) if current_line == \"M END\\n\": iterate_molecules", "file # not necessary for reaxys, but it will go through it anyway", "both, # Reaxys and Scifinder # flag = 0 # 0 = generic", "9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\" ) multiple_row_text = \"\" else:", "if re.match(\".+TXT\", previous_line) or flag == 5: # start of the experimental section.", "are the new files. \"\"\" ############################################################## # Fix erroneous entries (empty mols) by", "= generic # 8 = authors # 9 = skip rxn_id = \"\"", "############# # (structure same for Reaxys and Scifinder) # flag = 0 #", "doesn't matter for mol in range(num_mols_this_instance): mol_string = \"\".join(molecule[mol]) if mol_string == \"\":", "if current_line != \"$MOL\\n\" and (iterate_molecules < number_molecules): molecule[iterate_molecules].append(current_line) if current_line == \"M", "source) ### # # This is done last, since for Scifinder # this", "mols) by deleting those entries with open(RDF_IN_FILE) as file_in: seed_line = file_in.readline() previous_line", "rdf_fixer.convert(filename or path) (optional: returns list of new filenames) @author: <NAME> (DocMinus) license:", "it might change? # implies: y reactants, z product. x = current_line.strip().split(\" \")", "in table max_products (int): <> (products) \"\"\" # get the IDs and use", "1 # reset variables iterate_molecules = 0 molecule = [] mol_string = \"\"", "0 number_molecules = 0 iterate_molecules = 0 mol_string = \"\" rxn_id = \"\"", "continue # Get Authors if re.match(\".+AUTHOR\", previous_line) or flag == 8: flag =", "string replacement variable depending on source SCI_REAX = scifi_or_reax(RDF_OK_FILE) # build table according", "my_table, max_reagents, max_products = build_empty_table(RDF_OK_FILE, SCI_REAX) #################################################################### # Here comes the actual data", "\" \") flag = 8 previous_line = current_line ################################ # ### Extract citation", "RDLogger # Important, or else waaaay too many RDkit details in output RDLogger.logger().setLevel(RDLogger.CRITICAL)", "previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this is the end of citation", "0 # 0 = generic # 9 = skip # 4 = citation", "number_molecules = number_reagents + number_products # create fresh list of max no of", "2 = single MOL (molecules) # 9 = skip molecule = [] number_reagents", "############################################ # Finish table for export to csv file format my_table = my_table.replace(pd.np.nan,", "current_line.startswith( \"$RXN\" ) and previous_line.startswith(\"$RFMT\") # else for empty molecule block write_to_file =", "flag == 1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", line): # analyse the \"", "= multiple_row_text.replace( \"$DATUM \", \"\" ) multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\",", "Parts of os.walk snippet originated on Reddit somewhere, forgot where though. Args: RDF_IN", "= row_text.replace(\"$DATUM \", \"\") previous_line = current_line ################################ # ### Extract Experimental Procedure", "flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # analyse the \" y z\" line.", "\"\"\"original script with single file usage wrapped into this 'convert' function Args: RDF_IN_FILE:", "else: mol = rdc.MolFromMolBlock(mol_string, sanitize=False) if mol is None: continue try: rdc.SanitizeMol(mol) except", "of os.walk snippet originated on Reddit somewhere, forgot where though. Args: RDF_IN =", "9 = skip molecule = [] number_reagents = 0 number_products = 0 number_molecules", "a new reaction block if current_line.startswith(\"$RXN\") | flag == 1: flag = 1", "products. x = line.strip().split(\" \") number_reagents = int(x[0]) number_products = int(x[1]) if number_reagents", "0 continue if previous_line.startswith(\"$DTYPE\") and current_line.startswith(\"$DATUM\"): current_column = previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") row_text =", "multiple string.replace() methods, to render script independent of source Args: in_file (str): filename", "previous_line = seed_line for line in open(RDF_OK_FILE): current_line = line # get reaction", "$MOL block # yes, always same size within a *given file*, can change", "= \"\" else: mol = rdc.MolFromMolBlock(mol_string, sanitize=False) if mol is None: continue try:", "(str): filename of the corrected file: RDF_OK_FILE SCI_REAX (str): \"RXN:\" (scifinder) or string", "names Order: input_file; fixed_file; csv_file \"\"\" file_list_in = [] file_list_ok = [] file_list_csv", "License Copyright (c) 2021 DocMinus \"\"\" import os import re import pandas as", "re.match(\".+AUTHOR\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this", "Finish table for export to csv file format my_table = my_table.replace(pd.np.nan, \"\", regex=True)", "RDLogger.logger().setLevel(RDLogger.CRITICAL) def fix(RDF_IN: str) -> \"zipped\": \"\"\"Retrieving all .RDF files in a subdirectory", "list of max no of molecules, for use in $MOL block # yes,", "number_reagents = int(x[0]) number_products = int(x[1]) number_molecules = number_reagents + number_products # create", "flag == 5: # start of the experimental section. spans over multiple line", "filenames) @author: <NAME> (DocMinus) license: MIT License Copyright (c) 2021 DocMinus \"\"\" import", "= len(molecule) # should always be max_mol now, so doesn't matter for mol", "+ \".csv\") ) zipped = zip(file_list_in, file_list_ok, file_list_csv) # note: zip gets unpacked", ") if write_to_file: file_out.write(previous_line) previous_line = current_line file_out.write(previous_line) # the last line is", "= open(in_file) NUMBER_OF_LINES = 3 for i in range(NUMBER_OF_LINES): line_three = f.readline() return", "(only Scifinder) # flag = 0 # 0 = generic # 8 =", "= 6 previous_line = current_line ################################ # ######## Extract title ######## # (only", "Args: in_file (str): filename of the corrected file (in principle, the original one", "else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 7 previous_line = current_line ################################", "# Reaxys if re.match(\".+TXT\", previous_line) or flag == 5: # start of the", "os.walk snippet originated on Reddit somewhere, forgot where though. Args: RDF_IN = filename,", "might span over multiple line if re.match(\".+TITLE\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX,", "or flag == 7: flag = 7 # start of the Title section.", "current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Citation if", "python # -*- coding: utf-8 -*- \"\"\" Chemical RDF converter & fixer. Version", "= 0 continue # Get Citation if re.match(\".+CITATION\", previous_line) or flag == 4:", "row_text = current_line.replace(\"\\n\", \" \") # flag = 1 my_table.loc[rxn_id, current_column] = row_text.replace(\"$DATUM", "== \"$MOL\\n\") | (flag == 2): flag = 2 if current_line != \"$MOL\\n\"", "= not ( current_line.startswith(\"$DTYPE\") and previous_line.startswith(\"$RFMT\") ) if write_to_file: file_out.write(previous_line) previous_line = current_line", "Get Title if re.match(\".+TITLE\", previous_line) or flag == 7: flag = 7 #", "= 0 counter_products = 0 num_mols_this_instance = len(molecule) # should always be max_mol", "the new file names Order: input_file; fixed_file; csv_file \"\"\" file_list_in = [] file_list_ok", "convert(file_in, file_ok, file_csv) return zip(file_list_in, file_list_ok, file_list_csv) def convert(RDF_IN_FILE: str, RDF_OK_FILE: str, RDF_CSV_FILE:", "same time. my_table, max_reagents, max_products = build_empty_table(RDF_OK_FILE, SCI_REAX) #################################################################### # Here comes the", "if re.match(\".+STP\", current_line): # this is the end of experimental block flag =", "DocMinus \"\"\" import os import re import pandas as pd from collections import", "[] file_list_ok = [] file_list_csv = [] if os.path.isfile(RDF_IN): if RDF_IN.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(RDF_IN))", "multiline columns as well # but doesn't matter since those will be correctly", "0 # 0 = generic # 5 = exp procedure text over multiple", "5 previous_line = current_line ################################ # ######## Extract Notes ######## # (only Scifinder)", "current_line.startswith(\"$D\") & (previous_line == \"M END\\n\"): flag = 9 # could just use", "re.match(\".+TITLE\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this", "# flag = 0 # 0 = generic # 7 = title #", "the column headers fields = [] for i in range(max_reagents): tmp_name = \"Reagent\"", "flag = 6 previous_line = current_line ################################ # ######## Extract title ######## #", "= my_table.replace(pd.np.nan, \"\", regex=True) # need to remove NaN my_table.drop( list(my_table.filter(regex=\"COPYRIGHT\")), axis=1, inplace=True", "previous_line = current_line ################################ # ### Extract Experimental Procedure ### # Multiline, both,", "################################ # ######## Extract Notes ######## # (only Scifinder) # flag = 0", "# # This is done last, since for Scifinder # this is the", "max no of molecules, for use in $MOL block # yes, always same", "if re.match(\".+CITATION\", previous_line) or flag == 4: flag = 4 if re.match(\".+CITATION\", previous_line):", "= True for current_line in open(RDF_IN_FILE): # prevent first line from being written", "previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this is the end of Notes", "of the experimental section. spans over multiple line if re.match(\".+TXT\", previous_line): current_column =", "4 = citation rxn_id = \"\" multiple_row_text = \"\" previous_line = seed_line for", "Get Notes if re.match(\".+NOTES\", previous_line) or flag == 6: flag = 6 #", "of author block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\"", "string \"ROOT:\" (reaxys) used in replacements Returns: da_table (object): the (empty) pandas df", "first line of multiline columns as well # but doesn't matter since those", "= 8 previous_line = current_line ################################ # ### Extract citation (i.e. source) ###", "since those will be correctly overwritten later on rxn_id = \"\" previous_line =", "# start of the experimental section. spans over multiple line if re.match(\".+TXT\", previous_line):", "rxn_id = \"\" multiple_row_text = \"\" # get first line as \"seed\" for", "+= current_line.replace(\"\\n\", \" \") flag = 6 previous_line = current_line ################################ # ########", "re.match(\".+CITATION\", previous_line) or flag == 4: flag = 4 if re.match(\".+CITATION\", previous_line): current_column", "# ############### GET MOLECULES ############# # (structure same for Reaxys and Scifinder) #", "empty lines and the number of molecules lines and skips them continue #", "write_to_file = not ( current_line.startswith(\"$DTYPE\") and previous_line.startswith(\"$RFMT\") ) if write_to_file: file_out.write(previous_line) previous_line =", "\"RDF\")): file_list_in.append(os.path.join(subdir, file)) file_list_ok.append( os.path.join(subdir, os.path.splitext(file)[0] + \"_fixed.rdf\") ) file_list_csv.append( os.path.join(subdir, os.path.splitext(file)[0] +", "determine max no of reagents/products flag = 0 max_reagents = 0 max_products =", "1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", line): # analyse the \" y z\" line. # implies:", "replacements Returns: da_table (object): the (empty) pandas df working table max_reagents (int): number", "counter_products = 0 num_mols_this_instance = len(molecule) # should always be max_mol now, so", "\"\") ) if current_line.startswith(\"$DTYPE\"): # this is the end of title block flag", "the same time. my_table, max_reagents, max_products = build_empty_table(RDF_OK_FILE, SCI_REAX) #################################################################### # Here comes", "line # get reaction ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) # flag", "# skip the copyright (optional) my_table.to_csv(RDF_CSV_FILE, sep=\"\\t\", header=True, index=True) # end of script", "if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Citation", "\"_fixed.rdf\") ) file_list_csv.append( os.path.join(subdir, os.path.splitext(file)[0] + \".csv\") ) zipped = zip(file_list_in, file_list_ok, file_list_csv)", "== 1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # analyse the \" y", "os.path.isdir(RDF_IN): for subdir, dirs, files in os.walk(RDF_IN): for file in files: if file.endswith((\"rdf\",", "################################ # End of file scanning # ############################################ # Finish table for export", "5 else: # Reaxys if re.match(\".+TXT\", previous_line) or flag == 5: # start", "previous_line = seed_line # get first line as \"seed\" for upcoming loop #", "\"ROOT:\" (reaxys) used in replacements Returns: da_table (object): the (empty) pandas df working", "rxn_id, my_table.columns[counter_products + max_reagents] ] = smiles counter_products += 1 # reset variables", "+= 1 # reset variables iterate_molecules = 0 molecule = [] mol_string =", "one would work as well; alt even global variable possible instead) Returns: SCI_REAX", "counter_reagents += 1 else: my_table.loc[ rxn_id, my_table.columns[counter_products + max_reagents] ] = smiles counter_products", "number of molecules lines and skips them continue # after determining a block,", "exp procedure text over multiple lines # 9 = skip rxn_id = \"\"", "multiple line if re.match(\".+NOTES\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if", ") if current_line.startswith(\"$DTYPE\"): # this is the end of author block flag =", "flag == 6: flag = 6 # start of the Notes section. might", "= current_line ################################ # ######## Extract Notes ######## # (only Scifinder) # flag", "the loop, hence written out here. # end of fix section #################### def", "print(\"Error: \", _e) continue mol.UpdatePropertyCache(strict=False) rdc.SanitizeMol( mol, sanitizeOps=( rdc.SANITIZE_ALL ^ rdc.SANITIZE_CLEANUP ^ rdc.SANITIZE_PROPERTIES", "(Dec 28, 14:25:00 2021) Added mol sanitization and try/catch run by calling rdf_fixer.convert(filename", "flag = 1 my_table.loc[rxn_id, current_column] = row_text.replace(\"$DATUM \", \"\") previous_line = current_line ################################", ") multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 4", "# Get Title if re.match(\".+TITLE\", previous_line) or flag == 7: flag = 7", "### Extract Experimental Procedure ### # Multiline, both, # Reaxys and Scifinder #", "zipped = zip(file_list_in, file_list_ok, file_list_csv) # note: zip gets unpacked upon usage and", "scifi_or_reax(in_file: str) -> str: \"\"\"Determine if Scifinder or Reaxys rdf file (Scifinder contains", "my_table.loc[ rxn_id, my_table.columns[counter_products + max_reagents] ] = smiles counter_products += 1 # reset", "current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # get experimental section", "csv_file \"\"\" file_list_in = [] file_list_ok = [] file_list_csv = [] if os.path.isfile(RDF_IN):", "a molecule counter_reagents = 0 counter_products = 0 num_mols_this_instance = len(molecule) # should", "scanning # ############################################ # Finish table for export to csv file format my_table", "x = current_line.strip().split(\" \") number_reagents = int(x[0]) number_products = int(x[1]) number_molecules = number_reagents", "# finally, build the table da_table = pd.DataFrame( index=list_of_IDs, columns=list(OrderedDict.fromkeys(fields)) ) return da_table,", "headers fields = [] for i in range(max_reagents): tmp_name = \"Reagent\" + str(i)", "########## # # Nota bene: this will write first line of multiline columns", "current_line.startswith(\"$DATUM\"): current_column = previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") row_text = current_line.replace(\"\\n\", \" \") # flag", "will go through it anyway # (less ifs and doesn't screw anything up)", "global variable possible instead) Returns: SCI_REAX (str): \"RXN:\" (scifinder) or string \"ROOT:\" (reaxys)", "Notes section. might span over multiple line if re.match(\".+NOTES\", previous_line): current_column = (", "of experimental block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\"", "rdc.SANITIZE_CLEANUP ^ rdc.SANITIZE_PROPERTIES ), ) mol = rdMolStandardize.Normalize(mol) smiles = rdc.MolToSmiles(mol) # some", "Scifinder) # flag = 0 # 0 = generic # 1 = start", "this is the end of author block flag = 9 my_table.loc[rxn_id, current_column] =", "and disappears for file_in, file_ok, file_csv in zipped: print(\"Converting file: \", file_in) convert(file_in,", "current_line = line # get reaction ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2])", "= zip(file_list_in, file_list_ok, file_list_csv) # note: zip gets unpacked upon usage and disappears", "my_table.to_csv(RDF_CSV_FILE, sep=\"\\t\", header=True, index=True) # end of script # one could add a", "# get reaction ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) # flag =", "lines and the number of molecules lines and skips them continue # after", "even global variable possible instead) Returns: SCI_REAX (str): \"RXN:\" (scifinder) or string \"ROOT:\"", "(only Scifinder) # flag = 0 # 0 = generic # 6 =", "current_line in open(RDF_IN_FILE): # prevent first line from being written twice if current_line.startswith(\"$RDFILE\")", "within a *given file*, can change from file to file(!) for i in", "skip # 4 = citation rxn_id = \"\" multiple_row_text = \"\" previous_line =", "multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 6 previous_line = current_line ################################ #", "else: my_table.loc[ rxn_id, my_table.columns[counter_products + max_reagents] ] = smiles counter_products += 1 #", "Extract citation (i.e. source) ### # # This is done last, since for", "import RDLogger # Important, or else waaaay too many RDkit details in output", "fix(RDF_IN: str) -> \"zipped\": \"\"\"Retrieving all .RDF files in a subdirectory recursively. Then", "# rebuild the string of a molecule counter_reagents = 0 counter_products = 0", "NaN my_table.drop( list(my_table.filter(regex=\"COPYRIGHT\")), axis=1, inplace=True ) # skip the copyright (optional) my_table.to_csv(RDF_CSV_FILE, sep=\"\\t\",", "get string replacement variable depending on source SCI_REAX = scifi_or_reax(RDF_OK_FILE) # build table", "positions reagents/products accordingly if counter_reagents + 1 <= number_reagents: my_table.loc[ rxn_id, my_table.columns[counter_reagents] ]", "determining a block, find the molecules within the block if (current_line == \"$MOL\\n\")", "end of experimental block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \",", "= current_line ################################ # ####### Extract authors ######## # (only Scifinder) # flag", "= 0 molecule = [] mol_string = \"\" previous_line = current_line ################################ #", "# 0 = generic # 8 = authors # 9 = skip rxn_id", "sep=\"\\t\", header=True, index=True) # end of script # one could add a return", "if file.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(subdir, file)) file_list_ok.append( os.path.join(subdir, os.path.splitext(file)[0] + \"_fixed.rdf\") ) file_list_csv.append( os.path.join(subdir,", "re.match(\".+STP\", current_line): # this is the end of experimental block flag = 9", "number_products # create fresh list of max no of molecules, for use in", "same for Reaxys and Scifinder) # flag = 0 # 0 = generic", "re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", line): # analyse the \" y z\" line. # implies: y reactants,", "= 7 # start of the Title section. might span over multiple line", "and Scifinder) # flag = 0 # 0 = generic # 1 =", "smiles counter_reagents += 1 else: my_table.loc[ rxn_id, my_table.columns[counter_products + max_reagents] ] = smiles", "and (iterate_molecules < number_molecules): molecule[iterate_molecules].append(current_line) if current_line == \"M END\\n\": iterate_molecules += 1", "(Scifinder contains 'SCHEME' in the enumeration) Returned string is multiple string.replace() methods, to", "= 0 continue # start of a new reaction block if current_line.startswith(\"$RXN\") |", "in a subdirectory recursively. Then submit to conversion (i.e. fixing) Parts of os.walk", "columns=list(OrderedDict.fromkeys(fields)) ) return da_table, max_reagents, max_products ############################################################## # Initialize Table and diverse variables", "str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Title if re.match(\".+TITLE\", previous_line) or", "the number of molecules lines and skips them continue # after determining a", "fields.append(tmp_name) for line in open(in_file): if line.startswith(\"$DTYPE\"): fields.append((line.strip().split(\" \")[1]).replace(SCI_REAX, \"\")) # finally, build", "rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Authors if re.match(\".+AUTHOR\",", "[] if os.path.isfile(RDF_IN): if RDF_IN.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(RDF_IN)) file_list_ok.append(os.path.splitext(RDF_IN)[0] + \"_fixed.rdf\") file_list_csv.append(os.path.splitext(RDF_IN)[0] + \".csv\")", "multiple lines # 9 = skip rxn_id = \"\" multiple_row_text = \"\" previous_line", "of reagents/products flag = 0 max_reagents = 0 max_products = 0 for line", "file_csv) return zip(file_list_in, file_list_ok, file_list_csv) def convert(RDF_IN_FILE: str, RDF_OK_FILE: str, RDF_CSV_FILE: str): \"\"\"original", "IDs and use as row index list_of_IDs = [] # i.e. rows for", "to file(!) for i in range(number_molecules): molecule.append([]) if current_line == \"\\n\" or re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\",", "file (in principle, the original one would work as well; alt even global", "it will go through it anyway # (less ifs and doesn't screw anything", "file_list_csv.append(os.path.splitext(RDF_IN)[0] + \".csv\") elif os.path.isdir(RDF_IN): for subdir, dirs, files in os.walk(RDF_IN): for file", "reagents & products at the same time. my_table, max_reagents, max_products = build_empty_table(RDF_OK_FILE, SCI_REAX)", "the table da_table = pd.DataFrame( index=list_of_IDs, columns=list(OrderedDict.fromkeys(fields)) ) return da_table, max_reagents, max_products ##############################################################", "line): # analyse the \" y z\" line. # implies: y reactants, z", "= 0 # 0 = generic # 7 = title # 9 =", "y z\" line. Not hard-coding this since it might change? # implies: y", "if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Authors", "if re.match(\".+EXP_PROC\", previous_line) or flag == 5: # start of the experimental section.", "section. might span over multiple line if re.match(\".+TITLE\", previous_line): current_column = ( previous_line.strip().split(\"", "else for empty molecule block write_to_file = not ( current_line.startswith(\"$DTYPE\") and previous_line.startswith(\"$RFMT\") )", "6 previous_line = current_line ################################ # ######## Extract title ######## # (only Scifinder)", "methods, to render script independent of source Args: in_file (str): filename of the", "run by calling rdf_fixer.convert(filename or path) (optional: returns list of new filenames) @author:", "current_line.startswith(\"$DTYPE\") and previous_line.startswith(\"$RFMT\") ) if write_to_file: file_out.write(previous_line) previous_line = current_line file_out.write(previous_line) # the", "for reaxys, but it will go through it anyway # (less ifs and", "my_table.columns[counter_products + max_reagents] ] = smiles counter_products += 1 # reset variables iterate_molecules", "= \"\" multiple_row_text = \"\" previous_line = seed_line for line in open(RDF_OK_FILE): current_line", "= 0 iterate_molecules = 0 mol_string = \"\" rxn_id = \"\" multiple_row_text =", "if current_line.startswith(\"$RDFILE\") and previous_line.startswith( \"$RDFILE\" ): continue # correct molecule block # True", "RDF converter & fixer. Version 2.3 (Dec 28, 14:25:00 2021) Added mol sanitization", "0 # 0 = generic # 1 = start of reaction block #", "\"\") previous_line = current_line ################################ # ### Extract Experimental Procedure ### # Multiline,", "those entries with open(RDF_IN_FILE) as file_in: seed_line = file_in.readline() previous_line = seed_line #", "if current_line.startswith(\"$DTYPE\"): # this is the end of author block flag = 9", "current_line.startswith(\"$DTYPE\"): # this is the end of title block flag = 9 my_table.loc[rxn_id,", "# analyse the \" y z\" line. Not hard-coding this since it might", "= 0 number_products = 0 number_molecules = 0 iterate_molecules = 0 mol_string =", "mol_string = \"\" rxn_id = \"\" multiple_row_text = \"\" # get first line", "including path RDF_OK_FILE: new RDF file with corrections (if any) RDF_CSV_FILE: resulting CSV", "reaction ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) # flag = 0 continue", "7: flag = 7 # start of the Title section. might span over", "f = open(in_file) NUMBER_OF_LINES = 3 for i in range(NUMBER_OF_LINES): line_three = f.readline()", "number_reagents if number_products > max_products: max_products = number_products flag = 0 # build", "= [] file_list_ok = [] file_list_csv = [] if os.path.isfile(RDF_IN): if RDF_IN.endswith((\"rdf\", \"RDF\")):", "= pd.DataFrame( index=list_of_IDs, columns=list(OrderedDict.fromkeys(fields)) ) return da_table, max_reagents, max_products ############################################################## # Initialize Table", "by deleting those entries with open(RDF_IN_FILE) as file_in: seed_line = file_in.readline() previous_line =", "(empty mols) by deleting those entries with open(RDF_IN_FILE) as file_in: seed_line = file_in.readline()", "Version 2.3 (Dec 28, 14:25:00 2021) Added mol sanitization and try/catch run by", "so doesn't matter for mol in range(num_mols_this_instance): mol_string = \"\".join(molecule[mol]) if mol_string ==", "0 = generic # 1 = start of reaction block # 2 =", "file)) file_list_ok.append( os.path.join(subdir, os.path.splitext(file)[0] + \"_fixed.rdf\") ) file_list_csv.append( os.path.join(subdir, os.path.splitext(file)[0] + \".csv\") )", "= 3 for i in range(NUMBER_OF_LINES): line_three = f.readline() return \"RXN:\" if re.match(\".+SCHEME\",", "coding: utf-8 -*- \"\"\" Chemical RDF converter & fixer. Version 2.3 (Dec 28,", "smiles counter_products += 1 # reset variables iterate_molecules = 0 molecule = []", "# 9 = skip rxn_id = \"\" multiple_row_text = \"\" previous_line = seed_line", "2021 DocMinus \"\"\" import os import re import pandas as pd from collections", "submit to conversion (i.e. fixing) Parts of os.walk snippet originated on Reddit somewhere,", "smiles = rdc.MolToSmiles(mol) # some mols might be empty, this if/else positions reagents/products", "continue # after determining a block, find the molecules within the block if", "# 0 = generic # 5 = exp procedure text over multiple lines", "5: # start of the experimental section. spans over multiple line if re.match(\".+TXT\",", "current_line ################################ # ### Extract Experimental Procedure ### # Multiline, both, # Reaxys", "use as row index list_of_IDs = [] # i.e. rows for line in", "rdc from rdkit.Chem.MolStandardize import rdMolStandardize from rdkit import RDLogger # Important, or else", "build a pandas df used as main table Args: in_file (str): filename of", "SCI_REAX (str): \"RXN:\" (scifinder) or string \"ROOT:\" (reaxys) used in replacements Returns: da_table", "line_three) else \"ROOT:\" def build_empty_table(in_file: str, SCI_REAX: str): \"\"\"Scans file three times to", "7 previous_line = current_line ################################ # ####### Extract authors ######## # (only Scifinder)", "2 if current_line != \"$MOL\\n\" and (iterate_molecules < number_molecules): molecule[iterate_molecules].append(current_line) if current_line ==", "== 2): flag = 2 if current_line != \"$MOL\\n\" and (iterate_molecules < number_molecules):", "[] file_list_csv = [] if os.path.isfile(RDF_IN): if RDF_IN.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(RDF_IN)) file_list_ok.append(os.path.splitext(RDF_IN)[0] + \"_fixed.rdf\")", "iterate_molecules += 1 # end of the complete reaction block if current_line.startswith(\"$D\") &", "molecule = [] mol_string = \"\" previous_line = current_line ################################ # ######### GET", "written twice if current_line.startswith(\"$RDFILE\") and previous_line.startswith( \"$RDFILE\" ): continue # correct molecule block", "0 continue # Get Authors if re.match(\".+AUTHOR\", previous_line) or flag == 8: flag", "+= 1 else: my_table.loc[ rxn_id, my_table.columns[counter_products + max_reagents] ] = smiles counter_products +=", "output RDLogger.logger().setLevel(RDLogger.CRITICAL) def fix(RDF_IN: str) -> \"zipped\": \"\"\"Retrieving all .RDF files in a", "multiple line if re.match(\".+EXP_PROC\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if", "for upcoming loop # seed_line is later reused again with open(RDF_OK_FILE, \"w\") as", "too many RDkit details in output RDLogger.logger().setLevel(RDLogger.CRITICAL) def fix(RDF_IN: str) -> \"zipped\": \"\"\"Retrieving", "os.path.isfile(RDF_IN): if RDF_IN.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(RDF_IN)) file_list_ok.append(os.path.splitext(RDF_IN)[0] + \"_fixed.rdf\") file_list_csv.append(os.path.splitext(RDF_IN)[0] + \".csv\") elif os.path.isdir(RDF_IN):", "extraction and addition to pandas table # ############### GET MOLECULES ############# # (structure", "depending on source SCI_REAX = scifi_or_reax(RDF_OK_FILE) # build table according to files specs.", "index list_of_IDs = [] # i.e. rows for line in open(in_file): if line.startswith(\"$RFMT\"):", "flag = 8 if re.match(\".+AUTHOR\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") )", "product. x = current_line.strip().split(\" \") number_reagents = int(x[0]) number_products = int(x[1]) number_molecules =", "and skips them continue # after determining a block, find the molecules within", "multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 5 previous_line = current_line ################################ #", "\"\" ) multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag =", "[] for i in range(max_reagents): tmp_name = \"Reagent\" + str(i) fields.append(tmp_name) for i", ".RDF files in a subdirectory recursively. Then submit to conversion (i.e. fixing) Parts", "with open(RDF_IN_FILE) as file_in: seed_line = file_in.readline() previous_line = seed_line # get first", "hard-coding this since it might change? # implies: y reactants, z product. x", "file_list_csv) def convert(RDF_IN_FILE: str, RDF_OK_FILE: str, RDF_CSV_FILE: str): \"\"\"original script with single file", "str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Notes if re.match(\".+NOTES\", previous_line) or", "= 1 my_table.loc[rxn_id, current_column] = row_text.replace(\"$DATUM \", \"\") previous_line = current_line ################################ #", "and the number of molecules lines and skips them continue # after determining", "file to file(!) for i in range(number_molecules): molecule.append([]) if current_line == \"\\n\" or", "file_in: seed_line = file_in.readline() previous_line = seed_line # get first line as \"seed\"", "= generic # 1 = start of reaction block # 2 = single", "tmp_name = \"Reagent\" + str(i) fields.append(tmp_name) for i in range(max_products): tmp_name = \"Product\"", "= 0(?) # rebuild the string of a molecule counter_reagents = 0 counter_products", "file format my_table = my_table.replace(pd.np.nan, \"\", regex=True) # need to remove NaN my_table.drop(", "# -*- coding: utf-8 -*- \"\"\" Chemical RDF converter & fixer. Version 2.3", "0 # 0 = generic # 6 = notes, text potentially over multiple", "possible instead) Returns: SCI_REAX (str): \"RXN:\" (scifinder) or string \"ROOT:\" (reaxys) \"\"\" f", "of file scanning # ############################################ # Finish table for export to csv file", "0 mol_string = \"\" rxn_id = \"\" multiple_row_text = \"\" # get first", "# Multiline, both, # Reaxys and Scifinder # flag = 0 # 0", "== 4: flag = 4 if re.match(\".+CITATION\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX,", "no of reagents & products at the same time. my_table, max_reagents, max_products =", "6 = notes, text potentially over multiple lines # 9 = skip rxn_id", "spans over multiple line if re.match(\".+EXP_PROC\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\")", "will be correctly overwritten later on rxn_id = \"\" previous_line = seed_line for", "of max no of molecules, for use in $MOL block # yes, always", "continue # Get Title if re.match(\".+TITLE\", previous_line) or flag == 7: flag =", "= file_in.readline() previous_line = seed_line # get first line as \"seed\" for upcoming", "*given file*, can change from file to file(!) for i in range(number_molecules): molecule.append([])", "current_line): # this is the end of experimental block flag = 9 my_table.loc[rxn_id,", "a block, find the molecules within the block if (current_line == \"$MOL\\n\") |", "file_out.write(previous_line) previous_line = current_line file_out.write(previous_line) # the last line is not caught in", "# get string replacement variable depending on source SCI_REAX = scifi_or_reax(RDF_OK_FILE) # build", ") return da_table, max_reagents, max_products ############################################################## # Initialize Table and diverse variables #", "0 continue # Get Title if re.match(\".+TITLE\", previous_line) or flag == 7: flag", "# Finish table for export to csv file format my_table = my_table.replace(pd.np.nan, \"\",", "3 for i in range(NUMBER_OF_LINES): line_three = f.readline() return \"RXN:\" if re.match(\".+SCHEME\", line_three)", "re.match(\".+NOTES\", previous_line) or flag == 6: flag = 6 # start of the", "current_line ################################ # ### Extract citation (i.e. source) ### # # This is", "start of reaction block # 2 = single MOL (molecules) # 9 =", "if RDF_IN.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(RDF_IN)) file_list_ok.append(os.path.splitext(RDF_IN)[0] + \"_fixed.rdf\") file_list_csv.append(os.path.splitext(RDF_IN)[0] + \".csv\") elif os.path.isdir(RDF_IN): for", "write_to_file = current_line.startswith( \"$RXN\" ) and previous_line.startswith(\"$RFMT\") # else for empty molecule block", "skip molecule = [] number_reagents = 0 number_products = 0 number_molecules = 0", "ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) # flag = 0 continue if", "for file in files: if file.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(subdir, file)) file_list_ok.append( os.path.join(subdir, os.path.splitext(file)[0] +", "the end of Notes block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM", "table according to files specs. get max no of reagents & products at", "Reaxys and Scifinder) # flag = 0 # 0 = generic # 1", "since it might change? # implies: y reactants, z product. x = current_line.strip().split(\"", "last, since for Scifinder # this is the last entry in a file", "three times to build a pandas df used as main table Args: in_file", "end of title block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \",", "index=list_of_IDs, columns=list(OrderedDict.fromkeys(fields)) ) return da_table, max_reagents, max_products ############################################################## # Initialize Table and diverse", "= line # get reaction ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) #", "# Get Authors if re.match(\".+AUTHOR\", previous_line) or flag == 8: flag = 8", "continue try: rdc.SanitizeMol(mol) except ValueError as _e: print(\"Error: \", _e) continue mol.UpdatePropertyCache(strict=False) rdc.SanitizeMol(", "-*- coding: utf-8 -*- \"\"\" Chemical RDF converter & fixer. Version 2.3 (Dec", "would work as well; alt even global variable possible instead) Returns: SCI_REAX (str):", "################################ # ### Extract citation (i.e. source) ### # # This is done", "+= current_line.replace(\"\\n\", \" \") flag = 5 else: # Reaxys if re.match(\".+TXT\", previous_line)", "header=True, index=True) # end of script # one could add a return value", "table max_products (int): <> (products) \"\"\" # get the IDs and use as", "current_line.startswith(\"$RDFILE\") and previous_line.startswith( \"$RDFILE\" ): continue # correct molecule block # True write_to_file", "current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+NOTES\", current_line) or re.match( \".+REFERENCE.+\",", "of the corrected file: RDF_OK_FILE SCI_REAX (str): \"RXN:\" (scifinder) or string \"ROOT:\" (reaxys)", "current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) # flag = 0 continue if previous_line.startswith(\"$DTYPE\") and", "da_table = pd.DataFrame( index=list_of_IDs, columns=list(OrderedDict.fromkeys(fields)) ) return da_table, max_reagents, max_products ############################################################## # Initialize", "current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Notes if", "all .RDF files in a subdirectory recursively. Then submit to conversion (i.e. fixing)", "ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # start", "else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 5 else: # Reaxys if", "\") flag = 8 previous_line = current_line ################################ # ### Extract citation (i.e.", "i in range(NUMBER_OF_LINES): line_three = f.readline() return \"RXN:\" if re.match(\".+SCHEME\", line_three) else \"ROOT:\"", "(c) 2021 DocMinus \"\"\" import os import re import pandas as pd from", "= int(x[1]) if number_reagents > max_reagents: max_reagents = number_reagents if number_products > max_products:", "spans over multiple line if re.match(\".+TXT\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\")", "\"Product\" + str(i) fields.append(tmp_name) for line in open(in_file): if line.startswith(\"$DTYPE\"): fields.append((line.strip().split(\" \")[1]).replace(SCI_REAX, \"\"))", "# not necessary for reaxys, but it will go through it anyway #", "string \"ROOT:\" (reaxys) \"\"\" f = open(in_file) NUMBER_OF_LINES = 3 for i in", "= 0 continue # get experimental section if SCI_REAX == \"RXN:\": if re.match(\".+EXP_PROC\",", "table max_reagents (int): number for later positioning of reagents smiles in table max_products", "= skip molecule = [] number_reagents = 0 number_products = 0 number_molecules =", "this is the end of Notes block flag = 9 my_table.loc[rxn_id, current_column] =", "\"$RXN\" ) and previous_line.startswith(\"$RFMT\") # else for empty molecule block write_to_file = not", "from rdkit.Chem.MolStandardize import rdMolStandardize from rdkit import RDLogger # Important, or else waaaay", "block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\" ) multiple_row_text", "end of Notes block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \",", "(previous_line == \"M END\\n\"): flag = 9 # could just use flag =", "[] number_reagents = 0 number_products = 0 number_molecules = 0 iterate_molecules = 0", "as pd from collections import OrderedDict import rdkit.Chem as rdc from rdkit.Chem.MolStandardize import", "\".+REFERENCE.+\", current_line ): # this is the end of experimental block flag =", "# end of the complete reaction block if current_line.startswith(\"$D\") & (previous_line == \"M", "of molecules lines and skips them continue # after determining a block, find", "subdirectories to scan Returns: zipped List of the new file names Order: input_file;", "in range(num_mols_this_instance): mol_string = \"\".join(molecule[mol]) if mol_string == \"\": smiles = \"\" else:", "GET single line data ########## # # Nota bene: this will write first", "4 previous_line = current_line ################################ # End of file scanning # ############################################ #", "\"\" # get first line as \"seed\" for upcoming loop previous_line = seed_line", "\"\" previous_line = seed_line for line in open(RDF_OK_FILE): current_line = line # get", "as \"seed\" for upcoming loop # seed_line is later reused again with open(RDF_OK_FILE,", "= ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this is the end", "and addition to pandas table # ############### GET MOLECULES ############# # (structure same", "end of the complete reaction block if current_line.startswith(\"$D\") & (previous_line == \"M END\\n\"):", "the IDs and use as row index list_of_IDs = [] # i.e. rows", "reaxys, but it will go through it anyway # (less ifs and doesn't", "for line in open(in_file): if line.startswith(\"$RXN\") | flag == 1: flag = 1", "empty, this if/else positions reagents/products accordingly if counter_reagents + 1 <= number_reagents: my_table.loc[", "# 0 = generic # 6 = notes, text potentially over multiple lines", "= 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\" ) multiple_row_text = \"\"", "line.strip().split(\" \") number_reagents = int(x[0]) number_products = int(x[1]) if number_reagents > max_reagents: max_reagents", "same size within a *given file*, can change from file to file(!) for", "): continue # correct molecule block # True write_to_file = current_line.startswith( \"$RXN\" )", "analyse the \" y z\" line. Not hard-coding this since it might change?", "] = smiles counter_products += 1 # reset variables iterate_molecules = 0 molecule", "path) Returns: None - output are the new files. \"\"\" ############################################################## # Fix", "fields = [] for i in range(max_reagents): tmp_name = \"Reagent\" + str(i) fields.append(tmp_name)", "\"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 7 previous_line = current_line", "many RDkit details in output RDLogger.logger().setLevel(RDLogger.CRITICAL) def fix(RDF_IN: str) -> \"zipped\": \"\"\"Retrieving all", "# start of a new reaction block if current_line.startswith(\"$RXN\") | flag == 1:", "): # this is the end of experimental block flag = 9 my_table.loc[rxn_id,", "# this is the end of citation block flag = 9 my_table.loc[rxn_id, current_column]", "to remove NaN my_table.drop( list(my_table.filter(regex=\"COPYRIGHT\")), axis=1, inplace=True ) # skip the copyright (optional)", "file_list_ok.append( os.path.join(subdir, os.path.splitext(file)[0] + \"_fixed.rdf\") ) file_list_csv.append( os.path.join(subdir, os.path.splitext(file)[0] + \".csv\") ) zipped", "be max_mol now, so doesn't matter for mol in range(num_mols_this_instance): mol_string = \"\".join(molecule[mol])", "file_ok, file_csv in zipped: print(\"Converting file: \", file_in) convert(file_in, file_ok, file_csv) return zip(file_list_in,", "times to build a pandas df used as main table Args: in_file (str):", "the corrected file: RDF_OK_FILE SCI_REAX (str): \"RXN:\" (scifinder) or string \"ROOT:\" (reaxys) used", "= 4 if re.match(\".+CITATION\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if", "subdirectory recursively. Then submit to conversion (i.e. fixing) Parts of os.walk snippet originated", "for i in range(max_products): tmp_name = \"Product\" + str(i) fields.append(tmp_name) for line in", "for subdir, dirs, files in os.walk(RDF_IN): for file in files: if file.endswith((\"rdf\", \"RDF\")):", "in range(max_products): tmp_name = \"Product\" + str(i) fields.append(tmp_name) for line in open(in_file): if", "i in range(number_molecules): molecule.append([]) if current_line == \"\\n\" or re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # checks", "# 6 = notes, text potentially over multiple lines # 9 = skip", "\"$MOL\\n\") | (flag == 2): flag = 2 if current_line != \"$MOL\\n\" and", "flag = 0 # build the column headers fields = [] for i", "rdf file (Scifinder contains 'SCHEME' in the enumeration) Returned string is multiple string.replace()", "always be max_mol now, so doesn't matter for mol in range(num_mols_this_instance): mol_string =", "this since it might change? # implies: y reactants, z product. x =", "Args: RDF_IN = filename, alt. directory and subdirectories to scan Returns: zipped List", "file_ok, file_csv) return zip(file_list_in, file_list_ok, file_list_csv) def convert(RDF_IN_FILE: str, RDF_OK_FILE: str, RDF_CSV_FILE: str):", "max no of reagents & products at the same time. my_table, max_reagents, max_products", "# this is the end of Notes block flag = 9 my_table.loc[rxn_id, current_column]", "num_mols_this_instance = len(molecule) # should always be max_mol now, so doesn't matter for", "os.path.splitext(file)[0] + \"_fixed.rdf\") ) file_list_csv.append( os.path.join(subdir, os.path.splitext(file)[0] + \".csv\") ) zipped = zip(file_list_in,", "no of molecules, for use in $MOL block # yes, always same size", "is the last entry in a file # not necessary for reaxys, but", "the original one would work as well; alt even global variable possible instead)", "line is not caught in the loop, hence written out here. # end", "previous_line.startswith( \"$RDFILE\" ): continue # correct molecule block # True write_to_file = current_line.startswith(", "0 max_reagents = 0 max_products = 0 for line in open(in_file): if line.startswith(\"$RXN\")", "# 1 = start of reaction block # 2 = single MOL (molecules)", ") # skip the copyright (optional) my_table.to_csv(RDF_CSV_FILE, sep=\"\\t\", header=True, index=True) # end of", "build_empty_table(RDF_OK_FILE, SCI_REAX) #################################################################### # Here comes the actual data extraction and addition to", "flag == 8: flag = 8 if re.match(\".+AUTHOR\", previous_line): current_column = ( previous_line.strip().split(\"", "# reset variables iterate_molecules = 0 molecule = [] mol_string = \"\" previous_line", "table Args: in_file (str): filename of the corrected file: RDF_OK_FILE SCI_REAX (str): \"RXN:\"", "= seed_line for line in open(RDF_OK_FILE): current_line = line # get reaction ID", "a subdirectory recursively. Then submit to conversion (i.e. fixing) Parts of os.walk snippet", "is the end of citation block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace(", "# checks for empty lines and the number of molecules lines and skips", "\"\" multiple_row_text = \"\" # get first line as \"seed\" for upcoming loop", "Authors if re.match(\".+AUTHOR\", previous_line) or flag == 8: flag = 8 if re.match(\".+AUTHOR\",", "fields.append(tmp_name) for i in range(max_products): tmp_name = \"Product\" + str(i) fields.append(tmp_name) for line", "current_column] = multiple_row_text.replace( \"$DATUM \", \"\" ) multiple_row_text = \"\" else: multiple_row_text +=", "create fresh list of max no of molecules, for use in $MOL block", "\"seed\" for upcoming loop previous_line = seed_line for line in open(RDF_OK_FILE): current_line =", "\"RXN:\": if re.match(\".+EXP_PROC\", previous_line) or flag == 5: # start of the experimental", "pandas df used as main table Args: in_file (str): filename of the corrected", "# (only Scifinder) # flag = 0 # 0 = generic # 6", "and subdirectories to scan Returns: zipped List of the new file names Order:", "############################################################## # Fix erroneous entries (empty mols) by deleting those entries with open(RDF_IN_FILE)", "number_reagents: my_table.loc[ rxn_id, my_table.columns[counter_reagents] ] = smiles counter_reagents += 1 else: my_table.loc[ rxn_id,", "5 = exp procedure text over multiple lines # 9 = skip rxn_id", "overwritten later on rxn_id = \"\" previous_line = seed_line for line in open(RDF_OK_FILE):", "well # but doesn't matter since those will be correctly overwritten later on", "and diverse variables # get string replacement variable depending on source SCI_REAX =", "rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Notes if re.match(\".+NOTES\",", "rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Citation if re.match(\".+CITATION\",", "with single file usage wrapped into this 'convert' function Args: RDF_IN_FILE: original input", "6 # start of the Notes section. might span over multiple line if", "max_products = build_empty_table(RDF_OK_FILE, SCI_REAX) #################################################################### # Here comes the actual data extraction and", "Returns: da_table (object): the (empty) pandas df working table max_reagents (int): number for", "of reaction block # 2 = single MOL (molecules) # 9 = skip", "# i.e. rows for line in open(in_file): if line.startswith(\"$RFMT\"): list_of_IDs.append(line.strip().split(\" \")[2]) # determine", "original one would work as well; alt even global variable possible instead) Returns:", "previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+STP\", current_line): # this", "reused again with open(RDF_OK_FILE, \"w\") as file_out: write_to_file = True for current_line in", "number_products = int(x[1]) number_molecules = number_reagents + number_products # create fresh list of", "\"\"\"Retrieving all .RDF files in a subdirectory recursively. Then submit to conversion (i.e.", "of the Title section. might span over multiple line if re.match(\".+TITLE\", previous_line): current_column", "should always be max_mol now, so doesn't matter for mol in range(num_mols_this_instance): mol_string", "according to files specs. get max no of reagents & products at the", "on source SCI_REAX = scifi_or_reax(RDF_OK_FILE) # build table according to files specs. get", "\") flag = 7 previous_line = current_line ################################ # ####### Extract authors ########", "seed_line # get first line as \"seed\" for upcoming loop # seed_line is", "caught in the loop, hence written out here. # end of fix section", "#################### def scifi_or_reax(in_file: str) -> str: \"\"\"Determine if Scifinder or Reaxys rdf file", "previous_line = current_line ################################ # ### Extract citation (i.e. source) ### # #", "= ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+STP\", current_line): # this is the", "in output RDLogger.logger().setLevel(RDLogger.CRITICAL) def fix(RDF_IN: str) -> \"zipped\": \"\"\"Retrieving all .RDF files in", "+ \"_fixed.rdf\") file_list_csv.append(os.path.splitext(RDF_IN)[0] + \".csv\") elif os.path.isdir(RDF_IN): for subdir, dirs, files in os.walk(RDF_IN):", "str, RDF_CSV_FILE: str): \"\"\"original script with single file usage wrapped into this 'convert'", "skip rxn_id = \"\" multiple_row_text = \"\" previous_line = seed_line for line in", "# flag = 0 # 0 = generic # 9 = skip #", "hence written out here. # end of fix section #################### def scifi_or_reax(in_file: str)", "# get first line as \"seed\" for upcoming loop previous_line = seed_line for", "or flag == 5: # start of the experimental section. spans over multiple", "through it anyway # (less ifs and doesn't screw anything up) # flag", "\" \") flag = 4 previous_line = current_line ################################ # End of file", "open(in_file): if line.startswith(\"$DTYPE\"): fields.append((line.strip().split(\" \")[1]).replace(SCI_REAX, \"\")) # finally, build the table da_table =", "^ rdc.SANITIZE_PROPERTIES ), ) mol = rdMolStandardize.Normalize(mol) smiles = rdc.MolToSmiles(mol) # some mols", "> max_reagents: max_reagents = number_reagents if number_products > max_products: max_products = number_products flag", "\")[1].replace(SCI_REAX, \"\") row_text = current_line.replace(\"\\n\", \" \") # flag = 1 my_table.loc[rxn_id, current_column]", "output are the new files. \"\"\" ############################################################## # Fix erroneous entries (empty mols)", "replacement variable depending on source SCI_REAX = scifi_or_reax(RDF_OK_FILE) # build table according to", "\"\") ) if re.match(\".+NOTES\", current_line) or re.match( \".+REFERENCE.+\", current_line ): # this is", "flag = 9 # could just use flag = 0(?) # rebuild the", "products at the same time. my_table, max_reagents, max_products = build_empty_table(RDF_OK_FILE, SCI_REAX) #################################################################### #", "over multiple line if re.match(\".+TXT\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") )", "OrderedDict import rdkit.Chem as rdc from rdkit.Chem.MolStandardize import rdMolStandardize from rdkit import RDLogger", "molecule block write_to_file = not ( current_line.startswith(\"$DTYPE\") and previous_line.startswith(\"$RFMT\") ) if write_to_file: file_out.write(previous_line)", "to csv file format my_table = my_table.replace(pd.np.nan, \"\", regex=True) # need to remove", "Order: input_file; fixed_file; csv_file \"\"\" file_list_in = [] file_list_ok = [] file_list_csv =", "as well # but doesn't matter since those will be correctly overwritten later", "0 continue # Get Notes if re.match(\".+NOTES\", previous_line) or flag == 6: flag", "= filename, alt. directory and subdirectories to scan Returns: zipped List of the", "or string \"ROOT:\" (reaxys) \"\"\" f = open(in_file) NUMBER_OF_LINES = 3 for i", "file (Scifinder contains 'SCHEME' in the enumeration) Returned string is multiple string.replace() methods,", "any) RDF_CSV_FILE: resulting CSV file (incl. path) Returns: None - output are the", "is the end of author block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace(", "converter & fixer. Version 2.3 (Dec 28, 14:25:00 2021) Added mol sanitization and", "the end of experimental block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM", "\")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this is the end of author block", "else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 8 previous_line = current_line ################################", "max no of reagents/products flag = 0 max_reagents = 0 max_products = 0", "Reaxys if re.match(\".+TXT\", previous_line) or flag == 5: # start of the experimental", "int(x[0]) number_products = int(x[1]) number_molecules = number_reagents + number_products # create fresh list", "render script independent of source Args: in_file (str): filename of the corrected file", "the last entry in a file # not necessary for reaxys, but it", "RDF_IN.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(RDF_IN)) file_list_ok.append(os.path.splitext(RDF_IN)[0] + \"_fixed.rdf\") file_list_csv.append(os.path.splitext(RDF_IN)[0] + \".csv\") elif os.path.isdir(RDF_IN): for subdir,", "else: # Reaxys if re.match(\".+TXT\", previous_line) or flag == 5: # start of", "end of script # one could add a return value for better error", ") if current_line.startswith(\"$DTYPE\"): # this is the end of title block flag =", "start of the Notes section. might span over multiple line if re.match(\".+NOTES\", previous_line):", "generic # 5 = exp procedure text over multiple lines # 9 =", "file_list_ok.append(os.path.splitext(RDF_IN)[0] + \"_fixed.rdf\") file_list_csv.append(os.path.splitext(RDF_IN)[0] + \".csv\") elif os.path.isdir(RDF_IN): for subdir, dirs, files in", "of the corrected file (in principle, the original one would work as well;", "if re.match(\".+TITLE\", previous_line) or flag == 7: flag = 7 # start of", "is multiple string.replace() methods, to render script independent of source Args: in_file (str):", "= current_line ################################ # ### Extract Experimental Procedure ### # Multiline, both, #", "prevent first line from being written twice if current_line.startswith(\"$RDFILE\") and previous_line.startswith( \"$RDFILE\" ):", "section. spans over multiple line if re.match(\".+TXT\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX,", "(products) \"\"\" # get the IDs and use as row index list_of_IDs =", "if os.path.isfile(RDF_IN): if RDF_IN.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(RDF_IN)) file_list_ok.append(os.path.splitext(RDF_IN)[0] + \"_fixed.rdf\") file_list_csv.append(os.path.splitext(RDF_IN)[0] + \".csv\") elif", "yes, always same size within a *given file*, can change from file to", "import os import re import pandas as pd from collections import OrderedDict import", "and previous_line.startswith( \"$RDFILE\" ): continue # correct molecule block # True write_to_file =", "= str(current_line.strip().split(\" \")[2]) flag = 0 continue # start of a new reaction", "RDF file including path RDF_OK_FILE: new RDF file with corrections (if any) RDF_CSV_FILE:", "number_products = int(x[1]) if number_reagents > max_reagents: max_reagents = number_reagents if number_products >", ") multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 5", "# correct molecule block # True write_to_file = current_line.startswith( \"$RXN\" ) and previous_line.startswith(\"$RFMT\")", "comes the actual data extraction and addition to pandas table # ############### GET", "= 0 # build the column headers fields = [] for i in", "current_line ################################ # End of file scanning # ############################################ # Finish table for", "block if (current_line == \"$MOL\\n\") | (flag == 2): flag = 2 if", "if/else positions reagents/products accordingly if counter_reagents + 1 <= number_reagents: my_table.loc[ rxn_id, my_table.columns[counter_reagents]", "# need to remove NaN my_table.drop( list(my_table.filter(regex=\"COPYRIGHT\")), axis=1, inplace=True ) # skip the", "\"\" previous_line = current_line ################################ # ######### GET single line data ########## #", "to render script independent of source Args: in_file (str): filename of the corrected", "f.readline() return \"RXN:\" if re.match(\".+SCHEME\", line_three) else \"ROOT:\" def build_empty_table(in_file: str, SCI_REAX: str):", "i in range(max_products): tmp_name = \"Product\" + str(i) fields.append(tmp_name) for line in open(in_file):", "get max no of reagents & products at the same time. my_table, max_reagents,", "\")[2]) flag = 0 continue # Get Citation if re.match(\".+CITATION\", previous_line) or flag", "Nota bene: this will write first line of multiline columns as well #", "open(RDF_IN_FILE) as file_in: seed_line = file_in.readline() previous_line = seed_line # get first line", "# get reaction ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0", "str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Authors if re.match(\".+AUTHOR\", previous_line) or", "os.path.join(subdir, os.path.splitext(file)[0] + \"_fixed.rdf\") ) file_list_csv.append( os.path.join(subdir, os.path.splitext(file)[0] + \".csv\") ) zipped =", "multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 7 previous_line", "multiple_row_text = \"\" # get first line as \"seed\" for upcoming loop previous_line", "@author: <NAME> (DocMinus) license: MIT License Copyright (c) 2021 DocMinus \"\"\" import os", "last entry in a file # not necessary for reaxys, but it will", "da_table (object): the (empty) pandas df working table max_reagents (int): number for later", "screw anything up) # flag = 0 # 0 = generic # 9", "the molecules within the block if (current_line == \"$MOL\\n\") | (flag == 2):", "\")[2]) flag = 0 continue # Get Title if re.match(\".+TITLE\", previous_line) or flag", "# 7 = title # 9 = skip rxn_id = \"\" multiple_row_text =", "current_line.replace(\"\\n\", \" \") # flag = 1 my_table.loc[rxn_id, current_column] = row_text.replace(\"$DATUM \", \"\")", "line.startswith(\"$DTYPE\"): fields.append((line.strip().split(\" \")[1]).replace(SCI_REAX, \"\")) # finally, build the table da_table = pd.DataFrame( index=list_of_IDs,", "matter for mol in range(num_mols_this_instance): mol_string = \"\".join(molecule[mol]) if mol_string == \"\": smiles", "line from being written twice if current_line.startswith(\"$RDFILE\") and previous_line.startswith( \"$RDFILE\" ): continue #", "later positioning of reagents smiles in table max_products (int): <> (products) \"\"\" #", "= skip # 4 = citation rxn_id = \"\" multiple_row_text = \"\" previous_line", "previous_line = current_line ################################ # ####### Extract authors ######## # (only Scifinder) #", "molecule counter_reagents = 0 counter_products = 0 num_mols_this_instance = len(molecule) # should always", "line if re.match(\".+TXT\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+STP\",", ") multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 7", "################################ # ### Extract Experimental Procedure ### # Multiline, both, # Reaxys and", "\")[2]) flag = 0 continue # get experimental section if SCI_REAX == \"RXN:\":", "\"seed\" for upcoming loop # seed_line is later reused again with open(RDF_OK_FILE, \"w\")", "number_reagents > max_reagents: max_reagents = number_reagents if number_products > max_products: max_products = number_products", "the new files. \"\"\" ############################################################## # Fix erroneous entries (empty mols) by deleting", "0 # 0 = generic # 7 = title # 9 = skip", "= str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Notes if re.match(\".+NOTES\", previous_line)", "0 # build the column headers fields = [] for i in range(max_reagents):", "# flag = 1 my_table.loc[rxn_id, current_column] = row_text.replace(\"$DATUM \", \"\") previous_line = current_line", "and doesn't screw anything up) # flag = 0 # 0 = generic", "SCI_REAX == \"RXN:\": if re.match(\".+EXP_PROC\", previous_line) or flag == 5: # start of", "\"\") ) if current_line.startswith(\"$DTYPE\"): # this is the end of Notes block flag", "RDF_CSV_FILE: resulting CSV file (incl. path) Returns: None - output are the new", "re.match(\".+EXP_PROC\", previous_line) or flag == 5: # start of the experimental section. spans", "for current_line in open(RDF_IN_FILE): # prevent first line from being written twice if", "file_list_ok, file_list_csv) # note: zip gets unpacked upon usage and disappears for file_in,", "] = smiles counter_reagents += 1 else: my_table.loc[ rxn_id, my_table.columns[counter_products + max_reagents] ]", "# 9 = skip # 4 = citation rxn_id = \"\" multiple_row_text =", "upcoming loop # seed_line is later reused again with open(RDF_OK_FILE, \"w\") as file_out:", "title ######## # (only Scifinder) # flag = 0 # 0 = generic", "Get Citation if re.match(\".+CITATION\", previous_line) or flag == 4: flag = 4 if", "convert(RDF_IN_FILE: str, RDF_OK_FILE: str, RDF_CSV_FILE: str): \"\"\"original script with single file usage wrapped", "specs. get max no of reagents & products at the same time. my_table,", "directory and subdirectories to scan Returns: zipped List of the new file names", "= \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 4 previous_line =", "from being written twice if current_line.startswith(\"$RDFILE\") and previous_line.startswith( \"$RDFILE\" ): continue # correct", "file_in.readline() previous_line = seed_line # get first line as \"seed\" for upcoming loop", "in_file (str): filename of the corrected file (in principle, the original one would", "Multiline, both, # Reaxys and Scifinder # flag = 0 # 0 =", "import rdMolStandardize from rdkit import RDLogger # Important, or else waaaay too many", "range(num_mols_this_instance): mol_string = \"\".join(molecule[mol]) if mol_string == \"\": smiles = \"\" else: mol", "(optional: returns list of new filenames) @author: <NAME> (DocMinus) license: MIT License Copyright", "Args: RDF_IN_FILE: original input RDF file including path RDF_OK_FILE: new RDF file with", "path RDF_OK_FILE: new RDF file with corrections (if any) RDF_CSV_FILE: resulting CSV file", "= 0 continue # Get Notes if re.match(\".+NOTES\", previous_line) or flag == 6:", "number_reagents + number_products # create fresh list of max no of molecules, for", "\"M END\\n\"): flag = 9 # could just use flag = 0(?) #", "go through it anyway # (less ifs and doesn't screw anything up) #", "my_table.replace(pd.np.nan, \"\", regex=True) # need to remove NaN my_table.drop( list(my_table.filter(regex=\"COPYRIGHT\")), axis=1, inplace=True )", "4 if re.match(\".+CITATION\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"):", "and Scifinder # flag = 0 # 0 = generic # 5 =", "time. my_table, max_reagents, max_products = build_empty_table(RDF_OK_FILE, SCI_REAX) #################################################################### # Here comes the actual", "max_products = 0 for line in open(in_file): if line.startswith(\"$RXN\") | flag == 1:", "== 6: flag = 6 # start of the Notes section. might span", "= 0 max_reagents = 0 max_products = 0 for line in open(in_file): if", "# implies: y reactants, z products. x = line.strip().split(\" \") number_reagents = int(x[0])", "range(max_products): tmp_name = \"Product\" + str(i) fields.append(tmp_name) for line in open(in_file): if line.startswith(\"$DTYPE\"):", "if re.match(\".+AUTHOR\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): #", "(empty) pandas df working table max_reagents (int): number for later positioning of reagents", "# Here comes the actual data extraction and addition to pandas table #", "file in files: if file.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(subdir, file)) file_list_ok.append( os.path.join(subdir, os.path.splitext(file)[0] + \"_fixed.rdf\")", "can change from file to file(!) for i in range(number_molecules): molecule.append([]) if current_line", "0 = generic # 6 = notes, text potentially over multiple lines #", "# determine max no of reagents/products flag = 0 max_reagents = 0 max_products", "= str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Title if re.match(\".+TITLE\", previous_line)", "# one could add a return value for better error handling. return None", "######## Extract Notes ######## # (only Scifinder) # flag = 0 # 0", ") mol = rdMolStandardize.Normalize(mol) smiles = rdc.MolToSmiles(mol) # some mols might be empty,", "mol_string == \"\": smiles = \"\" else: mol = rdc.MolFromMolBlock(mol_string, sanitize=False) if mol", "flag = 2 if current_line != \"$MOL\\n\" and (iterate_molecules < number_molecules): molecule[iterate_molecules].append(current_line) if", "molecule block # True write_to_file = current_line.startswith( \"$RXN\" ) and previous_line.startswith(\"$RFMT\") # else", "\"$DATUM \", \"\" ) multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \")", "\"\")) # finally, build the table da_table = pd.DataFrame( index=list_of_IDs, columns=list(OrderedDict.fromkeys(fields)) ) return", "0 = generic # 7 = title # 9 = skip rxn_id =", "file*, can change from file to file(!) for i in range(number_molecules): molecule.append([]) if", "change? # implies: y reactants, z product. x = current_line.strip().split(\" \") number_reagents =", "line in open(in_file): if line.startswith(\"$DTYPE\"): fields.append((line.strip().split(\" \")[1]).replace(SCI_REAX, \"\")) # finally, build the table", "sanitizeOps=( rdc.SANITIZE_ALL ^ rdc.SANITIZE_CLEANUP ^ rdc.SANITIZE_PROPERTIES ), ) mol = rdMolStandardize.Normalize(mol) smiles =", "# build the column headers fields = [] for i in range(max_reagents): tmp_name", "new reaction block if current_line.startswith(\"$RXN\") | flag == 1: flag = 1 if", "this is the end of title block flag = 9 my_table.loc[rxn_id, current_column] =", "rdMolStandardize from rdkit import RDLogger # Important, or else waaaay too many RDkit", "molecule = [] number_reagents = 0 number_products = 0 number_molecules = 0 iterate_molecules", "= 5 else: # Reaxys if re.match(\".+TXT\", previous_line) or flag == 5: #", "if mol is None: continue try: rdc.SanitizeMol(mol) except ValueError as _e: print(\"Error: \",", "file including path RDF_OK_FILE: new RDF file with corrections (if any) RDF_CSV_FILE: resulting", "\")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this is the end of Notes block", "rxn_id = str(current_line.strip().split(\" \")[2]) # flag = 0 continue if previous_line.startswith(\"$DTYPE\") and current_line.startswith(\"$DATUM\"):", "Notes if re.match(\".+NOTES\", previous_line) or flag == 6: flag = 6 # start", "last line is not caught in the loop, hence written out here. #", "current_line ################################ # ######## Extract title ######## # (only Scifinder) # flag =", "# ######### GET single line data ########## # # Nota bene: this will", "& products at the same time. my_table, max_reagents, max_products = build_empty_table(RDF_OK_FILE, SCI_REAX) ####################################################################", "(DocMinus) license: MIT License Copyright (c) 2021 DocMinus \"\"\" import os import re", "file_csv in zipped: print(\"Converting file: \", file_in) convert(file_in, file_ok, file_csv) return zip(file_list_in, file_list_ok,", "if re.match(\".+AUTHOR\", previous_line) or flag == 8: flag = 8 if re.match(\".+AUTHOR\", previous_line):", "axis=1, inplace=True ) # skip the copyright (optional) my_table.to_csv(RDF_CSV_FILE, sep=\"\\t\", header=True, index=True) #", "\"\"\" Chemical RDF converter & fixer. Version 2.3 (Dec 28, 14:25:00 2021) Added", "molecules, for use in $MOL block # yes, always same size within a", "'convert' function Args: RDF_IN_FILE: original input RDF file including path RDF_OK_FILE: new RDF", "main table Args: in_file (str): filename of the corrected file: RDF_OK_FILE SCI_REAX (str):", "(only Scifinder) # flag = 0 # 0 = generic # 7 =", "(scifinder) or string \"ROOT:\" (reaxys) \"\"\" f = open(in_file) NUMBER_OF_LINES = 3 for", "os.path.splitext(file)[0] + \".csv\") ) zipped = zip(file_list_in, file_list_ok, file_list_csv) # note: zip gets", "MOLECULES ############# # (structure same for Reaxys and Scifinder) # flag = 0", "else waaaay too many RDkit details in output RDLogger.logger().setLevel(RDLogger.CRITICAL) def fix(RDF_IN: str) ->", "smiles in table max_products (int): <> (products) \"\"\" # get the IDs and", "addition to pandas table # ############### GET MOLECULES ############# # (structure same for", "\" \") # flag = 1 my_table.loc[rxn_id, current_column] = row_text.replace(\"$DATUM \", \"\") previous_line", "the actual data extraction and addition to pandas table # ############### GET MOLECULES", "this is the end of experimental block flag = 9 my_table.loc[rxn_id, current_column] =", "<= number_reagents: my_table.loc[ rxn_id, my_table.columns[counter_reagents] ] = smiles counter_reagents += 1 else: my_table.loc[", "table for export to csv file format my_table = my_table.replace(pd.np.nan, \"\", regex=True) #", "flag = 8 previous_line = current_line ################################ # ### Extract citation (i.e. source)", "independent of source Args: in_file (str): filename of the corrected file (in principle,", "line.startswith(\"$RXN\") | flag == 1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", line): # analyse", "rdkit.Chem as rdc from rdkit.Chem.MolStandardize import rdMolStandardize from rdkit import RDLogger # Important,", "# 0 = generic # 1 = start of reaction block # 2", "= 0 # 0 = generic # 6 = notes, text potentially over", "= scifi_or_reax(RDF_OK_FILE) # build table according to files specs. get max no of", "utf-8 -*- \"\"\" Chemical RDF converter & fixer. Version 2.3 (Dec 28, 14:25:00", "if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # get experimental", "note: zip gets unpacked upon usage and disappears for file_in, file_ok, file_csv in", "if Scifinder or Reaxys rdf file (Scifinder contains 'SCHEME' in the enumeration) Returned", "re.match(\".+TITLE\", previous_line) or flag == 7: flag = 7 # start of the", "################################ # ####### Extract authors ######## # (only Scifinder) # flag = 0", "flag = 0 continue # start of a new reaction block if current_line.startswith(\"$RXN\")", "= [] # i.e. rows for line in open(in_file): if line.startswith(\"$RFMT\"): list_of_IDs.append(line.strip().split(\" \")[2])", "# but doesn't matter since those will be correctly overwritten later on rxn_id", "multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 8 previous_line = current_line ################################ #", "^ rdc.SANITIZE_CLEANUP ^ rdc.SANITIZE_PROPERTIES ), ) mol = rdMolStandardize.Normalize(mol) smiles = rdc.MolToSmiles(mol) #", "for Reaxys and Scifinder) # flag = 0 # 0 = generic #", "if (current_line == \"$MOL\\n\") | (flag == 2): flag = 2 if current_line", "# (less ifs and doesn't screw anything up) # flag = 0 #", "of reagents smiles in table max_products (int): <> (products) \"\"\" # get the", "= str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Citation if re.match(\".+CITATION\", previous_line)", "0 # 0 = generic # 8 = authors # 9 = skip", "continue mol.UpdatePropertyCache(strict=False) rdc.SanitizeMol( mol, sanitizeOps=( rdc.SANITIZE_ALL ^ rdc.SANITIZE_CLEANUP ^ rdc.SANITIZE_PROPERTIES ), ) mol", "could just use flag = 0(?) # rebuild the string of a molecule", "into this 'convert' function Args: RDF_IN_FILE: original input RDF file including path RDF_OK_FILE:", "current_line ################################ # ####### Extract authors ######## # (only Scifinder) # flag =", "\"ROOT:\" (reaxys) \"\"\" f = open(in_file) NUMBER_OF_LINES = 3 for i in range(NUMBER_OF_LINES):", "number_products > max_products: max_products = number_products flag = 0 # build the column", "try: rdc.SanitizeMol(mol) except ValueError as _e: print(\"Error: \", _e) continue mol.UpdatePropertyCache(strict=False) rdc.SanitizeMol( mol,", "empty molecule block write_to_file = not ( current_line.startswith(\"$DTYPE\") and previous_line.startswith(\"$RFMT\") ) if write_to_file:", "complete reaction block if current_line.startswith(\"$D\") & (previous_line == \"M END\\n\"): flag = 9", "\"\".join(molecule[mol]) if mol_string == \"\": smiles = \"\" else: mol = rdc.MolFromMolBlock(mol_string, sanitize=False)", "= [] file_list_csv = [] if os.path.isfile(RDF_IN): if RDF_IN.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(RDF_IN)) file_list_ok.append(os.path.splitext(RDF_IN)[0] +", "waaaay too many RDkit details in output RDLogger.logger().setLevel(RDLogger.CRITICAL) def fix(RDF_IN: str) -> \"zipped\":", "if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) # flag = 0 continue if previous_line.startswith(\"$DTYPE\")", "files in a subdirectory recursively. Then submit to conversion (i.e. fixing) Parts of", "rdc.SANITIZE_ALL ^ rdc.SANITIZE_CLEANUP ^ rdc.SANITIZE_PROPERTIES ), ) mol = rdMolStandardize.Normalize(mol) smiles = rdc.MolToSmiles(mol)", "first line from being written twice if current_line.startswith(\"$RDFILE\") and previous_line.startswith( \"$RDFILE\" ): continue", "deleting those entries with open(RDF_IN_FILE) as file_in: seed_line = file_in.readline() previous_line = seed_line", "corrected file (in principle, the original one would work as well; alt even", "str(current_line.strip().split(\" \")[2]) flag = 0 continue # get experimental section if SCI_REAX ==", "details in output RDLogger.logger().setLevel(RDLogger.CRITICAL) def fix(RDF_IN: str) -> \"zipped\": \"\"\"Retrieving all .RDF files", "re.match(\".+NOTES\", current_line) or re.match( \".+REFERENCE.+\", current_line ): # this is the end of", "path) (optional: returns list of new filenames) @author: <NAME> (DocMinus) license: MIT License", "file names Order: input_file; fixed_file; csv_file \"\"\" file_list_in = [] file_list_ok = []", "pandas df working table max_reagents (int): number for later positioning of reagents smiles", "= rdMolStandardize.Normalize(mol) smiles = rdc.MolToSmiles(mol) # some mols might be empty, this if/else", "block # yes, always same size within a *given file*, can change from", "a file # not necessary for reaxys, but it will go through it", "df working table max_reagents (int): number for later positioning of reagents smiles in", "5: # start of the experimental section. spans over multiple line if re.match(\".+EXP_PROC\",", "= \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 5 previous_line =", "mol, sanitizeOps=( rdc.SANITIZE_ALL ^ rdc.SANITIZE_CLEANUP ^ rdc.SANITIZE_PROPERTIES ), ) mol = rdMolStandardize.Normalize(mol) smiles", "usage wrapped into this 'convert' function Args: RDF_IN_FILE: original input RDF file including", "== \"\": smiles = \"\" else: mol = rdc.MolFromMolBlock(mol_string, sanitize=False) if mol is", "os.walk(RDF_IN): for file in files: if file.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(subdir, file)) file_list_ok.append( os.path.join(subdir, os.path.splitext(file)[0]", "continue if previous_line.startswith(\"$DTYPE\") and current_line.startswith(\"$DATUM\"): current_column = previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") row_text = current_line.replace(\"\\n\",", "= 0 # 0 = generic # 9 = skip # 4 =", "for i in range(NUMBER_OF_LINES): line_three = f.readline() return \"RXN:\" if re.match(\".+SCHEME\", line_three) else", "get the IDs and use as row index list_of_IDs = [] # i.e.", "variable possible instead) Returns: SCI_REAX (str): \"RXN:\" (scifinder) or string \"ROOT:\" (reaxys) \"\"\"", "max_products (int): <> (products) \"\"\" # get the IDs and use as row", "############################################################## # Initialize Table and diverse variables # get string replacement variable depending", "= current_line ################################ # ######### GET single line data ########## # # Nota", "except ValueError as _e: print(\"Error: \", _e) continue mol.UpdatePropertyCache(strict=False) rdc.SanitizeMol( mol, sanitizeOps=( rdc.SANITIZE_ALL", "open(in_file): if line.startswith(\"$RXN\") | flag == 1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", line):", "size within a *given file*, can change from file to file(!) for i", "Args: in_file (str): filename of the corrected file: RDF_OK_FILE SCI_REAX (str): \"RXN:\" (scifinder)", "but doesn't matter since those will be correctly overwritten later on rxn_id =", "wrapped into this 'convert' function Args: RDF_IN_FILE: original input RDF file including path", "# start of the Notes section. might span over multiple line if re.match(\".+NOTES\",", "_e) continue mol.UpdatePropertyCache(strict=False) rdc.SanitizeMol( mol, sanitizeOps=( rdc.SANITIZE_ALL ^ rdc.SANITIZE_CLEANUP ^ rdc.SANITIZE_PROPERTIES ), )", "0 = generic # 8 = authors # 9 = skip rxn_id =", "= 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", line): # analyse the \" y z\" line. #", "# ### Extract citation (i.e. source) ### # # This is done last,", "current_line ################################ # ######## Extract Notes ######## # (only Scifinder) # flag =", "= citation rxn_id = \"\" multiple_row_text = \"\" previous_line = seed_line for line", "= rdc.MolFromMolBlock(mol_string, sanitize=False) if mol is None: continue try: rdc.SanitizeMol(mol) except ValueError as", "6: flag = 6 # start of the Notes section. might span over", "Scifinder) # flag = 0 # 0 = generic # 7 = title", "up) # flag = 0 # 0 = generic # 9 = skip", "multiple line if re.match(\".+TITLE\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if", "if re.match(\".+SCHEME\", line_three) else \"ROOT:\" def build_empty_table(in_file: str, SCI_REAX: str): \"\"\"Scans file three", "= smiles counter_reagents += 1 else: my_table.loc[ rxn_id, my_table.columns[counter_products + max_reagents] ] =", "filename, alt. directory and subdirectories to scan Returns: zipped List of the new", "+ max_reagents] ] = smiles counter_products += 1 # reset variables iterate_molecules =", "continue # Get Notes if re.match(\".+NOTES\", previous_line) or flag == 6: flag =", "\"\"\" # get the IDs and use as row index list_of_IDs = []", "SCI_REAX: str): \"\"\"Scans file three times to build a pandas df used as", "current_line.startswith(\"$DTYPE\"): # this is the end of citation block flag = 9 my_table.loc[rxn_id,", "to build a pandas df used as main table Args: in_file (str): filename", "(if any) RDF_CSV_FILE: resulting CSV file (incl. path) Returns: None - output are", "\")[2]) # flag = 0 continue if previous_line.startswith(\"$DTYPE\") and current_line.startswith(\"$DATUM\"): current_column = previous_line.strip().split(\"", "max_products = number_products flag = 0 # build the column headers fields =", "\")[2]) flag = 0 continue # Get Authors if re.match(\".+AUTHOR\", previous_line) or flag", "flag = 0 # 0 = generic # 1 = start of reaction", "analyse the \" y z\" line. # implies: y reactants, z products. x", "y z\" line. # implies: y reactants, z products. x = line.strip().split(\" \")", "just use flag = 0(?) # rebuild the string of a molecule counter_reagents", "def scifi_or_reax(in_file: str) -> str: \"\"\"Determine if Scifinder or Reaxys rdf file (Scifinder", "\")[1]).replace(SCI_REAX, \"\")) # finally, build the table da_table = pd.DataFrame( index=list_of_IDs, columns=list(OrderedDict.fromkeys(fields)) )", "always same size within a *given file*, can change from file to file(!)", "matter since those will be correctly overwritten later on rxn_id = \"\" previous_line", "= \"Product\" + str(i) fields.append(tmp_name) for line in open(in_file): if line.startswith(\"$DTYPE\"): fields.append((line.strip().split(\" \")[1]).replace(SCI_REAX,", "else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 5 previous_line = current_line ################################", "flag == 7: flag = 7 # start of the Title section. might", "= int(x[0]) number_products = int(x[1]) number_molecules = number_reagents + number_products # create fresh", "flag = 0 continue # get experimental section if SCI_REAX == \"RXN:\": if", "zipped List of the new file names Order: input_file; fixed_file; csv_file \"\"\" file_list_in", "in_file (str): filename of the corrected file: RDF_OK_FILE SCI_REAX (str): \"RXN:\" (scifinder) or", "(molecules) # 9 = skip molecule = [] number_reagents = 0 number_products =", "if re.match(\".+TITLE\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): #", "generic # 6 = notes, text potentially over multiple lines # 9 =", "flag = 0 continue # Get Authors if re.match(\".+AUTHOR\", previous_line) or flag ==", "max_reagents, max_products ############################################################## # Initialize Table and diverse variables # get string replacement", "if re.match(\".+CITATION\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): #", "RDF_OK_FILE: str, RDF_CSV_FILE: str): \"\"\"original script with single file usage wrapped into this", "column headers fields = [] for i in range(max_reagents): tmp_name = \"Reagent\" +", "reaction block if current_line.startswith(\"$D\") & (previous_line == \"M END\\n\"): flag = 9 #", "open(RDF_OK_FILE, \"w\") as file_out: write_to_file = True for current_line in open(RDF_IN_FILE): # prevent", "pd.DataFrame( index=list_of_IDs, columns=list(OrderedDict.fromkeys(fields)) ) return da_table, max_reagents, max_products ############################################################## # Initialize Table and", "flag = 0 # 0 = generic # 8 = authors # 9", "original input RDF file including path RDF_OK_FILE: new RDF file with corrections (if", "Returned string is multiple string.replace() methods, to render script independent of source Args:", "data ########## # # Nota bene: this will write first line of multiline", "re import pandas as pd from collections import OrderedDict import rdkit.Chem as rdc", "Initialize Table and diverse variables # get string replacement variable depending on source", "= \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 7 previous_line =", "), ) mol = rdMolStandardize.Normalize(mol) smiles = rdc.MolToSmiles(mol) # some mols might be", "in os.walk(RDF_IN): for file in files: if file.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(subdir, file)) file_list_ok.append( os.path.join(subdir,", "contains 'SCHEME' in the enumeration) Returned string is multiple string.replace() methods, to render", "= \"\" # get first line as \"seed\" for upcoming loop previous_line =", "number_products = 0 number_molecules = 0 iterate_molecules = 0 mol_string = \"\" rxn_id", "experimental section. spans over multiple line if re.match(\".+EXP_PROC\", previous_line): current_column = ( previous_line.strip().split(\"", "\") # flag = 1 my_table.loc[rxn_id, current_column] = row_text.replace(\"$DATUM \", \"\") previous_line =", "line.startswith(\"$RFMT\"): list_of_IDs.append(line.strip().split(\" \")[2]) # determine max no of reagents/products flag = 0 max_reagents", "copyright (optional) my_table.to_csv(RDF_CSV_FILE, sep=\"\\t\", header=True, index=True) # end of script # one could", "\"\" rxn_id = \"\" multiple_row_text = \"\" # get first line as \"seed\"", "as file_in: seed_line = file_in.readline() previous_line = seed_line # get first line as", "0 counter_products = 0 num_mols_this_instance = len(molecule) # should always be max_mol now,", "+ \"_fixed.rdf\") ) file_list_csv.append( os.path.join(subdir, os.path.splitext(file)[0] + \".csv\") ) zipped = zip(file_list_in, file_list_ok,", "originated on Reddit somewhere, forgot where though. Args: RDF_IN = filename, alt. directory", "( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+STP\", current_line): # this is the end", "if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", line): # analyse the \" y z\" line. # implies: y", "\"\") row_text = current_line.replace(\"\\n\", \" \") # flag = 1 my_table.loc[rxn_id, current_column] =", "(iterate_molecules < number_molecules): molecule[iterate_molecules].append(current_line) if current_line == \"M END\\n\": iterate_molecules += 1 #", "previous_line = current_line ################################ # ######## Extract Notes ######## # (only Scifinder) #", "\")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this is the end of title block", "zipped: print(\"Converting file: \", file_in) convert(file_in, file_ok, file_csv) return zip(file_list_in, file_list_ok, file_list_csv) def", "== \"RXN:\": if re.match(\".+EXP_PROC\", previous_line) or flag == 5: # start of the", "necessary for reaxys, but it will go through it anyway # (less ifs", "reaction ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue #", "string.replace() methods, to render script independent of source Args: in_file (str): filename of", "instead) Returns: SCI_REAX (str): \"RXN:\" (scifinder) or string \"ROOT:\" (reaxys) \"\"\" f =", "(object): the (empty) pandas df working table max_reagents (int): number for later positioning", "0 = generic # 9 = skip # 4 = citation rxn_id =", "# True write_to_file = current_line.startswith( \"$RXN\" ) and previous_line.startswith(\"$RFMT\") # else for empty", "the corrected file (in principle, the original one would work as well; alt", "current_line ################################ # ######### GET single line data ########## # # Nota bene:", "(i.e. fixing) Parts of os.walk snippet originated on Reddit somewhere, forgot where though.", "= 8 if re.match(\".+AUTHOR\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if", "(i.e. source) ### # # This is done last, since for Scifinder #", "file: RDF_OK_FILE SCI_REAX (str): \"RXN:\" (scifinder) or string \"ROOT:\" (reaxys) used in replacements", "might be empty, this if/else positions reagents/products accordingly if counter_reagents + 1 <=", "2021) Added mol sanitization and try/catch run by calling rdf_fixer.convert(filename or path) (optional:", "data extraction and addition to pandas table # ############### GET MOLECULES ############# #", "return da_table, max_reagents, max_products ############################################################## # Initialize Table and diverse variables # get", "checks for empty lines and the number of molecules lines and skips them", "i.e. rows for line in open(in_file): if line.startswith(\"$RFMT\"): list_of_IDs.append(line.strip().split(\" \")[2]) # determine max", "the end of citation block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM", "Title if re.match(\".+TITLE\", previous_line) or flag == 7: flag = 7 # start", "of the Notes section. might span over multiple line if re.match(\".+NOTES\", previous_line): current_column", "= int(x[1]) number_molecules = number_reagents + number_products # create fresh list of max", "written out here. # end of fix section #################### def scifi_or_reax(in_file: str) ->", "here. # end of fix section #################### def scifi_or_reax(in_file: str) -> str: \"\"\"Determine", "principle, the original one would work as well; alt even global variable possible", "# some mols might be empty, this if/else positions reagents/products accordingly if counter_reagents", "\"\"\"Scans file three times to build a pandas df used as main table", "later on rxn_id = \"\" previous_line = seed_line for line in open(RDF_OK_FILE): current_line", "previous_line) or flag == 7: flag = 7 # start of the Title", "in files: if file.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(subdir, file)) file_list_ok.append( os.path.join(subdir, os.path.splitext(file)[0] + \"_fixed.rdf\") )", "# build table according to files specs. get max no of reagents &", "reagents/products accordingly if counter_reagents + 1 <= number_reagents: my_table.loc[ rxn_id, my_table.columns[counter_reagents] ] =", "\"\\n\" or re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # checks for empty lines and the number of", "text over multiple lines # 9 = skip rxn_id = \"\" multiple_row_text =", "previous_line = current_line file_out.write(previous_line) # the last line is not caught in the", "variable depending on source SCI_REAX = scifi_or_reax(RDF_OK_FILE) # build table according to files", "== 5: # start of the experimental section. spans over multiple line if", "= number_reagents + number_products # create fresh list of max no of molecules,", "# 2 = single MOL (molecules) # 9 = skip molecule = []", "lines and skips them continue # after determining a block, find the molecules", "of a molecule counter_reagents = 0 counter_products = 0 num_mols_this_instance = len(molecule) #", "line if re.match(\".+EXP_PROC\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+NOTES\",", "-> str: \"\"\"Determine if Scifinder or Reaxys rdf file (Scifinder contains 'SCHEME' in", "of multiline columns as well # but doesn't matter since those will be", "RDF_OK_FILE SCI_REAX (str): \"RXN:\" (scifinder) or string \"ROOT:\" (reaxys) used in replacements Returns:", "reaction block # 2 = single MOL (molecules) # 9 = skip molecule", "# note: zip gets unpacked upon usage and disappears for file_in, file_ok, file_csv", "disappears for file_in, file_ok, file_csv in zipped: print(\"Converting file: \", file_in) convert(file_in, file_ok,", "True for current_line in open(RDF_IN_FILE): # prevent first line from being written twice", "change from file to file(!) for i in range(number_molecules): molecule.append([]) if current_line ==", "# ### Extract Experimental Procedure ### # Multiline, both, # Reaxys and Scifinder", "is the end of experimental block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace(", "be empty, this if/else positions reagents/products accordingly if counter_reagents + 1 <= number_reagents:", "= start of reaction block # 2 = single MOL (molecules) # 9", ") multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 6", "flag = 0 # 0 = generic # 9 = skip # 4", "<NAME> (DocMinus) license: MIT License Copyright (c) 2021 DocMinus \"\"\" import os import", "rxn_id, my_table.columns[counter_reagents] ] = smiles counter_reagents += 1 else: my_table.loc[ rxn_id, my_table.columns[counter_products +", "0 = generic # 5 = exp procedure text over multiple lines #", "######## Extract title ######## # (only Scifinder) # flag = 0 # 0", "line. # implies: y reactants, z products. x = line.strip().split(\" \") number_reagents =", "in range(max_reagents): tmp_name = \"Reagent\" + str(i) fields.append(tmp_name) for i in range(max_products): tmp_name", "snippet originated on Reddit somewhere, forgot where though. Args: RDF_IN = filename, alt.", "if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # start of", "molecule.append([]) if current_line == \"\\n\" or re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # checks for empty lines", "max_reagents = number_reagents if number_products > max_products: max_products = number_products flag = 0", "\"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 4 previous_line = current_line", "string of a molecule counter_reagents = 0 counter_products = 0 num_mols_this_instance = len(molecule)", "previous_line.startswith(\"$RFMT\") # else for empty molecule block write_to_file = not ( current_line.startswith(\"$DTYPE\") and", "if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Notes", "find the molecules within the block if (current_line == \"$MOL\\n\") | (flag ==", "mols might be empty, this if/else positions reagents/products accordingly if counter_reagents + 1", "not necessary for reaxys, but it will go through it anyway # (less", "skip the copyright (optional) my_table.to_csv(RDF_CSV_FILE, sep=\"\\t\", header=True, index=True) # end of script #", "single MOL (molecules) # 9 = skip molecule = [] number_reagents = 0", "my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\" ) multiple_row_text = \"\" else: multiple_row_text", "# ######## Extract title ######## # (only Scifinder) # flag = 0 #", "seed_line = file_in.readline() previous_line = seed_line # get first line as \"seed\" for", "if current_line.startswith(\"$DTYPE\"): # this is the end of citation block flag = 9", "rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # start of a new", "(int): number for later positioning of reagents smiles in table max_products (int): <>", "after determining a block, find the molecules within the block if (current_line ==", "Scifinder # flag = 0 # 0 = generic # 5 = exp", "Added mol sanitization and try/catch run by calling rdf_fixer.convert(filename or path) (optional: returns", "of Notes block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\"", "a *given file*, can change from file to file(!) for i in range(number_molecules):", "None - output are the new files. \"\"\" ############################################################## # Fix erroneous entries", "str) -> str: \"\"\"Determine if Scifinder or Reaxys rdf file (Scifinder contains 'SCHEME'", "continue # Get Citation if re.match(\".+CITATION\", previous_line) or flag == 4: flag =", "\")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this is the end of citation block", "index=True) # end of script # one could add a return value for", "\"RXN:\" if re.match(\".+SCHEME\", line_three) else \"ROOT:\" def build_empty_table(in_file: str, SCI_REAX: str): \"\"\"Scans file", "= current_line.replace(\"\\n\", \" \") # flag = 1 my_table.loc[rxn_id, current_column] = row_text.replace(\"$DATUM \",", "= title # 9 = skip rxn_id = \"\" multiple_row_text = \"\" previous_line", "flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", line): # analyse the \" y z\" line.", "max_reagents] ] = smiles counter_products += 1 # reset variables iterate_molecules = 0", "re.match(\".+TXT\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+STP\", current_line): #", "section #################### def scifi_or_reax(in_file: str) -> str: \"\"\"Determine if Scifinder or Reaxys rdf", "(optional) my_table.to_csv(RDF_CSV_FILE, sep=\"\\t\", header=True, index=True) # end of script # one could add", "get experimental section if SCI_REAX == \"RXN:\": if re.match(\".+EXP_PROC\", previous_line) or flag ==", "= number_products flag = 0 # build the column headers fields = []", "Experimental Procedure ### # Multiline, both, # Reaxys and Scifinder # flag =", "= 0 max_products = 0 for line in open(in_file): if line.startswith(\"$RXN\") | flag", "= 0 # 0 = generic # 8 = authors # 9 =", "Not hard-coding this since it might change? # implies: y reactants, z product.", "\"w\") as file_out: write_to_file = True for current_line in open(RDF_IN_FILE): # prevent first", "= current_line ################################ # End of file scanning # ############################################ # Finish table", "anyway # (less ifs and doesn't screw anything up) # flag = 0", "if number_products > max_products: max_products = number_products flag = 0 # build the", "8 previous_line = current_line ################################ # ### Extract citation (i.e. source) ### #", "end of author block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \",", "files. \"\"\" ############################################################## # Fix erroneous entries (empty mols) by deleting those entries", "= generic # 5 = exp procedure text over multiple lines # 9", "open(RDF_IN_FILE): # prevent first line from being written twice if current_line.startswith(\"$RDFILE\") and previous_line.startswith(", "implies: y reactants, z products. x = line.strip().split(\" \") number_reagents = int(x[0]) number_products", "the experimental section. spans over multiple line if re.match(\".+EXP_PROC\", previous_line): current_column = (", "# analyse the \" y z\" line. # implies: y reactants, z products.", "######## # (only Scifinder) # flag = 0 # 0 = generic #", "import rdkit.Chem as rdc from rdkit.Chem.MolStandardize import rdMolStandardize from rdkit import RDLogger #", "of source Args: in_file (str): filename of the corrected file (in principle, the", "previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+NOTES\", current_line) or re.match(", "0 continue # get experimental section if SCI_REAX == \"RXN:\": if re.match(\".+EXP_PROC\", previous_line)", "flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\" ) multiple_row_text =", "write_to_file: file_out.write(previous_line) previous_line = current_line file_out.write(previous_line) # the last line is not caught", "if re.match(\".+TXT\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+STP\", current_line):", "script # one could add a return value for better error handling. return", "+= current_line.replace(\"\\n\", \" \") flag = 7 previous_line = current_line ################################ # #######", "current_line.replace(\"\\n\", \" \") flag = 7 previous_line = current_line ################################ # ####### Extract", "over multiple line if re.match(\".+TITLE\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") )", "zip gets unpacked upon usage and disappears for file_in, file_ok, file_csv in zipped:", "counter_products += 1 # reset variables iterate_molecules = 0 molecule = [] mol_string", "SCI_REAX = scifi_or_reax(RDF_OK_FILE) # build table according to files specs. get max no", ") and previous_line.startswith(\"$RFMT\") # else for empty molecule block write_to_file = not (", "in open(in_file): if line.startswith(\"$RFMT\"): list_of_IDs.append(line.strip().split(\" \")[2]) # determine max no of reagents/products flag", "rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # get experimental section if", "csv file format my_table = my_table.replace(pd.np.nan, \"\", regex=True) # need to remove NaN", "+= current_line.replace(\"\\n\", \" \") flag = 4 previous_line = current_line ################################ # End", "str): \"\"\"Scans file three times to build a pandas df used as main", "this will write first line of multiline columns as well # but doesn't", "previous_line) or flag == 4: flag = 4 if re.match(\".+CITATION\", previous_line): current_column =", "format my_table = my_table.replace(pd.np.nan, \"\", regex=True) # need to remove NaN my_table.drop( list(my_table.filter(regex=\"COPYRIGHT\")),", "zip(file_list_in, file_list_ok, file_list_csv) def convert(RDF_IN_FILE: str, RDF_OK_FILE: str, RDF_CSV_FILE: str): \"\"\"original script with", "\" \") flag = 6 previous_line = current_line ################################ # ######## Extract title", "write_to_file = True for current_line in open(RDF_IN_FILE): # prevent first line from being", "print(\"Converting file: \", file_in) convert(file_in, file_ok, file_csv) return zip(file_list_in, file_list_ok, file_list_csv) def convert(RDF_IN_FILE:", "# Get Citation if re.match(\".+CITATION\", previous_line) or flag == 4: flag = 4", "is the end of Notes block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace(", "of the new file names Order: input_file; fixed_file; csv_file \"\"\" file_list_in = []", "7 = title # 9 = skip rxn_id = \"\" multiple_row_text = \"\"", "of script # one could add a return value for better error handling.", "at the same time. my_table, max_reagents, max_products = build_empty_table(RDF_OK_FILE, SCI_REAX) #################################################################### # Here", "experimental section. spans over multiple line if re.match(\".+TXT\", previous_line): current_column = ( previous_line.strip().split(\"", "fixed_file; csv_file \"\"\" file_list_in = [] file_list_ok = [] file_list_csv = [] if", "os import re import pandas as pd from collections import OrderedDict import rdkit.Chem", "This is done last, since for Scifinder # this is the last entry", "mol is None: continue try: rdc.SanitizeMol(mol) except ValueError as _e: print(\"Error: \", _e)", "( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this is the end of", "rdc.SANITIZE_PROPERTIES ), ) mol = rdMolStandardize.Normalize(mol) smiles = rdc.MolToSmiles(mol) # some mols might", "flag = 6 # start of the Notes section. might span over multiple", "# Reaxys and Scifinder # flag = 0 # 0 = generic #", "\"\"\" import os import re import pandas as pd from collections import OrderedDict", "previous_line = current_line ################################ # ######### GET single line data ########## # #", "and previous_line.startswith(\"$RFMT\") # else for empty molecule block write_to_file = not ( current_line.startswith(\"$DTYPE\")", "= 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # analyse the \" y z\" line. Not", "re.match(\".+CITATION\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this", "\", \"\" ) multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag", "max_reagents, max_products = build_empty_table(RDF_OK_FILE, SCI_REAX) #################################################################### # Here comes the actual data extraction", "continue # correct molecule block # True write_to_file = current_line.startswith( \"$RXN\" ) and", "<> (products) \"\"\" # get the IDs and use as row index list_of_IDs", "re.match( \".+REFERENCE.+\", current_line ): # this is the end of experimental block flag", "= \"\" multiple_row_text = \"\" # get first line as \"seed\" for upcoming", "iterate_molecules = 0 mol_string = \"\" rxn_id = \"\" multiple_row_text = \"\" #", "twice if current_line.startswith(\"$RDFILE\") and previous_line.startswith( \"$RDFILE\" ): continue # correct molecule block #", "\"RDF\")): file_list_in.append(os.path.join(RDF_IN)) file_list_ok.append(os.path.splitext(RDF_IN)[0] + \"_fixed.rdf\") file_list_csv.append(os.path.splitext(RDF_IN)[0] + \".csv\") elif os.path.isdir(RDF_IN): for subdir, dirs,", "Returns: None - output are the new files. \"\"\" ############################################################## # Fix erroneous", "in range(NUMBER_OF_LINES): line_three = f.readline() return \"RXN:\" if re.match(\".+SCHEME\", line_three) else \"ROOT:\" def", "int(x[1]) number_molecules = number_reagents + number_products # create fresh list of max no", "molecules lines and skips them continue # after determining a block, find the", "again with open(RDF_OK_FILE, \"w\") as file_out: write_to_file = True for current_line in open(RDF_IN_FILE):", "number_products flag = 0 # build the column headers fields = [] for", "\"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 6 previous_line = current_line", "in open(in_file): if line.startswith(\"$RXN\") | flag == 1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\",", "tmp_name = \"Product\" + str(i) fields.append(tmp_name) for line in open(in_file): if line.startswith(\"$DTYPE\"): fields.append((line.strip().split(\"", "flag == 4: flag = 4 if re.match(\".+CITATION\", previous_line): current_column = ( previous_line.strip().split(\"", "positioning of reagents smiles in table max_products (int): <> (products) \"\"\" # get", "0 continue # Get Citation if re.match(\".+CITATION\", previous_line) or flag == 4: flag", "== 8: flag = 8 if re.match(\".+AUTHOR\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX,", "for file_in, file_ok, file_csv in zipped: print(\"Converting file: \", file_in) convert(file_in, file_ok, file_csv)", "generic # 1 = start of reaction block # 2 = single MOL", "molecule[iterate_molecules].append(current_line) if current_line == \"M END\\n\": iterate_molecules += 1 # end of the", "z products. x = line.strip().split(\" \") number_reagents = int(x[0]) number_products = int(x[1]) if", "file with corrections (if any) RDF_CSV_FILE: resulting CSV file (incl. path) Returns: None", "entries (empty mols) by deleting those entries with open(RDF_IN_FILE) as file_in: seed_line =", "8 = authors # 9 = skip rxn_id = \"\" multiple_row_text = \"\"", "### Extract citation (i.e. source) ### # # This is done last, since", "my_table.loc[rxn_id, current_column] = row_text.replace(\"$DATUM \", \"\") previous_line = current_line ################################ # ### Extract", "flag = 0 max_reagents = 0 max_products = 0 for line in open(in_file):", "\") number_reagents = int(x[0]) number_products = int(x[1]) number_molecules = number_reagents + number_products #", "from collections import OrderedDict import rdkit.Chem as rdc from rdkit.Chem.MolStandardize import rdMolStandardize from", "corrected file: RDF_OK_FILE SCI_REAX (str): \"RXN:\" (scifinder) or string \"ROOT:\" (reaxys) used in", "\" \") flag = 5 previous_line = current_line ################################ # ######## Extract Notes", "= line.strip().split(\" \") number_reagents = int(x[0]) number_products = int(x[1]) if number_reagents > max_reagents:", "a pandas df used as main table Args: in_file (str): filename of the", "open(in_file) NUMBER_OF_LINES = 3 for i in range(NUMBER_OF_LINES): line_three = f.readline() return \"RXN:\"", "Extract authors ######## # (only Scifinder) # flag = 0 # 0 =", "of title block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\"", "for i in range(max_reagents): tmp_name = \"Reagent\" + str(i) fields.append(tmp_name) for i in", "# ############################################ # Finish table for export to csv file format my_table =", "(str): \"RXN:\" (scifinder) or string \"ROOT:\" (reaxys) \"\"\" f = open(in_file) NUMBER_OF_LINES =", "rebuild the string of a molecule counter_reagents = 0 counter_products = 0 num_mols_this_instance", "(flag == 2): flag = 2 if current_line != \"$MOL\\n\" and (iterate_molecules <", "\"RXN:\" (scifinder) or string \"ROOT:\" (reaxys) \"\"\" f = open(in_file) NUMBER_OF_LINES = 3", "section if SCI_REAX == \"RXN:\": if re.match(\".+EXP_PROC\", previous_line) or flag == 5: #", "= \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 8 previous_line =", "\"\") ) if current_line.startswith(\"$DTYPE\"): # this is the end of citation block flag", "write first line of multiline columns as well # but doesn't matter since", "RDkit details in output RDLogger.logger().setLevel(RDLogger.CRITICAL) def fix(RDF_IN: str) -> \"zipped\": \"\"\"Retrieving all .RDF", "of a new reaction block if current_line.startswith(\"$RXN\") | flag == 1: flag =", "if re.match(\".+EXP_PROC\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+NOTES\", current_line)", "as \"seed\" for upcoming loop previous_line = seed_line for line in open(RDF_OK_FILE): current_line", "if mol_string == \"\": smiles = \"\" else: mol = rdc.MolFromMolBlock(mol_string, sanitize=False) if", "(reaxys) \"\"\" f = open(in_file) NUMBER_OF_LINES = 3 for i in range(NUMBER_OF_LINES): line_three", "multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 5 previous_line", "mol_string = \"\" previous_line = current_line ################################ # ######### GET single line data", "generic # 7 = title # 9 = skip rxn_id = \"\" multiple_row_text", "previous_line) or flag == 5: # start of the experimental section. spans over", "import OrderedDict import rdkit.Chem as rdc from rdkit.Chem.MolStandardize import rdMolStandardize from rdkit import", "x = line.strip().split(\" \") number_reagents = int(x[0]) number_products = int(x[1]) if number_reagents >", "for upcoming loop previous_line = seed_line for line in open(RDF_OK_FILE): current_line = line", "\") flag = 5 previous_line = current_line ################################ # ######## Extract Notes ########", "#################################################################### # Here comes the actual data extraction and addition to pandas table", "\"\", regex=True) # need to remove NaN my_table.drop( list(my_table.filter(regex=\"COPYRIGHT\")), axis=1, inplace=True ) #", "build_empty_table(in_file: str, SCI_REAX: str): \"\"\"Scans file three times to build a pandas df", "but it will go through it anyway # (less ifs and doesn't screw", "import pandas as pd from collections import OrderedDict import rdkit.Chem as rdc from", "build table according to files specs. get max no of reagents & products", "in replacements Returns: da_table (object): the (empty) pandas df working table max_reagents (int):", "# flag = 0 # 0 = generic # 5 = exp procedure", "notes, text potentially over multiple lines # 9 = skip rxn_id = \"\"", "0 number_products = 0 number_molecules = 0 iterate_molecules = 0 mol_string = \"\"", "Here comes the actual data extraction and addition to pandas table # ###############", "da_table, max_reagents, max_products ############################################################## # Initialize Table and diverse variables # get string", "\"\"\" ############################################################## # Fix erroneous entries (empty mols) by deleting those entries with", "if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Title", "= notes, text potentially over multiple lines # 9 = skip rxn_id =", "not ( current_line.startswith(\"$DTYPE\") and previous_line.startswith(\"$RFMT\") ) if write_to_file: file_out.write(previous_line) previous_line = current_line file_out.write(previous_line)", "= 6 # start of the Notes section. might span over multiple line", "= 0 continue # Get Title if re.match(\".+TITLE\", previous_line) or flag == 7:", "def fix(RDF_IN: str) -> \"zipped\": \"\"\"Retrieving all .RDF files in a subdirectory recursively.", "bene: this will write first line of multiline columns as well # but", "8: flag = 8 if re.match(\".+AUTHOR\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\")", "span over multiple line if re.match(\".+TITLE\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\")", "+ number_products # create fresh list of max no of molecules, for use", "# else for empty molecule block write_to_file = not ( current_line.startswith(\"$DTYPE\") and previous_line.startswith(\"$RFMT\")", ") multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 8", "current_line): # analyse the \" y z\" line. Not hard-coding this since it", "fields.append((line.strip().split(\" \")[1]).replace(SCI_REAX, \"\")) # finally, build the table da_table = pd.DataFrame( index=list_of_IDs, columns=list(OrderedDict.fromkeys(fields))", "max_mol now, so doesn't matter for mol in range(num_mols_this_instance): mol_string = \"\".join(molecule[mol]) if", "Citation if re.match(\".+CITATION\", previous_line) or flag == 4: flag = 4 if re.match(\".+CITATION\",", "0 max_products = 0 for line in open(in_file): if line.startswith(\"$RXN\") | flag ==", "start of the experimental section. spans over multiple line if re.match(\".+TXT\", previous_line): current_column", "# start of the experimental section. spans over multiple line if re.match(\".+EXP_PROC\", previous_line):", "\"$MOL\\n\" and (iterate_molecules < number_molecules): molecule[iterate_molecules].append(current_line) if current_line == \"M END\\n\": iterate_molecules +=", "for empty lines and the number of molecules lines and skips them continue", "multiple line if re.match(\".+TXT\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if", "ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # get", "Scifinder or Reaxys rdf file (Scifinder contains 'SCHEME' in the enumeration) Returned string", "Returns: zipped List of the new file names Order: input_file; fixed_file; csv_file \"\"\"", "z\" line. # implies: y reactants, z products. x = line.strip().split(\" \") number_reagents", "for line in open(in_file): if line.startswith(\"$DTYPE\"): fields.append((line.strip().split(\" \")[1]).replace(SCI_REAX, \"\")) # finally, build the", "file_list_in.append(os.path.join(subdir, file)) file_list_ok.append( os.path.join(subdir, os.path.splitext(file)[0] + \"_fixed.rdf\") ) file_list_csv.append( os.path.join(subdir, os.path.splitext(file)[0] + \".csv\")", "re.match(\".+TXT\", previous_line) or flag == 5: # start of the experimental section. spans", "+= current_line.replace(\"\\n\", \" \") flag = 5 previous_line = current_line ################################ # ########", "if current_line.startswith(\"$D\") & (previous_line == \"M END\\n\"): flag = 9 # could just", "flag == 1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # analyse the \"", "1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", line): # analyse the \" y z\"", "# could just use flag = 0(?) # rebuild the string of a", "line_three = f.readline() return \"RXN:\" if re.match(\".+SCHEME\", line_three) else \"ROOT:\" def build_empty_table(in_file: str,", "# this is the end of experimental block flag = 9 my_table.loc[rxn_id, current_column]", "Chemical RDF converter & fixer. Version 2.3 (Dec 28, 14:25:00 2021) Added mol", "potentially over multiple lines # 9 = skip rxn_id = \"\" multiple_row_text =", "str(current_line.strip().split(\" \")[2]) # flag = 0 continue if previous_line.startswith(\"$DTYPE\") and current_line.startswith(\"$DATUM\"): current_column =", "current_line.replace(\"\\n\", \" \") flag = 5 previous_line = current_line ################################ # ######## Extract", "citation block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\" )", "for export to csv file format my_table = my_table.replace(pd.np.nan, \"\", regex=True) # need", "re.match(\".+SCHEME\", line_three) else \"ROOT:\" def build_empty_table(in_file: str, SCI_REAX: str): \"\"\"Scans file three times", "# implies: y reactants, z product. x = current_line.strip().split(\" \") number_reagents = int(x[0])", "# should always be max_mol now, so doesn't matter for mol in range(num_mols_this_instance):", "loop # seed_line is later reused again with open(RDF_OK_FILE, \"w\") as file_out: write_to_file", "multiple_row_text = \"\" previous_line = seed_line for line in open(RDF_OK_FILE): current_line = line", "\"zipped\": \"\"\"Retrieving all .RDF files in a subdirectory recursively. Then submit to conversion", "line in open(RDF_OK_FILE): current_line = line # get reaction ID if current_line.startswith(\"$RFMT\"): rxn_id", "# get the IDs and use as row index list_of_IDs = [] #", "+ str(i) fields.append(tmp_name) for line in open(in_file): if line.startswith(\"$DTYPE\"): fields.append((line.strip().split(\" \")[1]).replace(SCI_REAX, \"\")) #", "Procedure ### # Multiline, both, # Reaxys and Scifinder # flag = 0", "citation (i.e. source) ### # # This is done last, since for Scifinder", "= 0 for line in open(in_file): if line.startswith(\"$RXN\") | flag == 1: flag", "SCI_REAX) #################################################################### # Here comes the actual data extraction and addition to pandas", "####### Extract authors ######## # (only Scifinder) # flag = 0 # 0", "multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 6 previous_line", "title block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\" )", "list_of_IDs = [] # i.e. rows for line in open(in_file): if line.startswith(\"$RFMT\"): list_of_IDs.append(line.strip().split(\"", "for empty molecule block write_to_file = not ( current_line.startswith(\"$DTYPE\") and previous_line.startswith(\"$RFMT\") ) if", "file(!) for i in range(number_molecules): molecule.append([]) if current_line == \"\\n\" or re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line):", "######### GET single line data ########## # # Nota bene: this will write", "list of new filenames) @author: <NAME> (DocMinus) license: MIT License Copyright (c) 2021", "NUMBER_OF_LINES = 3 for i in range(NUMBER_OF_LINES): line_three = f.readline() return \"RXN:\" if", "as row index list_of_IDs = [] # i.e. rows for line in open(in_file):", "is the end of title block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace(", "new RDF file with corrections (if any) RDF_CSV_FILE: resulting CSV file (incl. path)", "file_out: write_to_file = True for current_line in open(RDF_IN_FILE): # prevent first line from", "(int): <> (products) \"\"\" # get the IDs and use as row index", "(incl. path) Returns: None - output are the new files. \"\"\" ############################################################## #", "\"\"\" file_list_in = [] file_list_ok = [] file_list_csv = [] if os.path.isfile(RDF_IN): if", "End of file scanning # ############################################ # Finish table for export to csv", "my_table.drop( list(my_table.filter(regex=\"COPYRIGHT\")), axis=1, inplace=True ) # skip the copyright (optional) my_table.to_csv(RDF_CSV_FILE, sep=\"\\t\", header=True,", "END\\n\"): flag = 9 # could just use flag = 0(?) # rebuild", "# flag = 0 continue if previous_line.startswith(\"$DTYPE\") and current_line.startswith(\"$DATUM\"): current_column = previous_line.strip().split(\" \")[1].replace(SCI_REAX,", "Important, or else waaaay too many RDkit details in output RDLogger.logger().setLevel(RDLogger.CRITICAL) def fix(RDF_IN:", "reagents/products flag = 0 max_reagents = 0 max_products = 0 for line in", "accordingly if counter_reagents + 1 <= number_reagents: my_table.loc[ rxn_id, my_table.columns[counter_reagents] ] = smiles", "= \"\".join(molecule[mol]) if mol_string == \"\": smiles = \"\" else: mol = rdc.MolFromMolBlock(mol_string,", "current_line): # checks for empty lines and the number of molecules lines and", "1 <= number_reagents: my_table.loc[ rxn_id, my_table.columns[counter_reagents] ] = smiles counter_reagents += 1 else:", "\"\" else: mol = rdc.MolFromMolBlock(mol_string, sanitize=False) if mol is None: continue try: rdc.SanitizeMol(mol)", "function Args: RDF_IN_FILE: original input RDF file including path RDF_OK_FILE: new RDF file", "might change? # implies: y reactants, z product. x = current_line.strip().split(\" \") number_reagents", "inplace=True ) # skip the copyright (optional) my_table.to_csv(RDF_CSV_FILE, sep=\"\\t\", header=True, index=True) # end", "resulting CSV file (incl. path) Returns: None - output are the new files.", "1 else: my_table.loc[ rxn_id, my_table.columns[counter_products + max_reagents] ] = smiles counter_products += 1", "Fix erroneous entries (empty mols) by deleting those entries with open(RDF_IN_FILE) as file_in:", "diverse variables # get string replacement variable depending on source SCI_REAX = scifi_or_reax(RDF_OK_FILE)", "1 = start of reaction block # 2 = single MOL (molecules) #", "counter_reagents = 0 counter_products = 0 num_mols_this_instance = len(molecule) # should always be", "implies: y reactants, z product. x = current_line.strip().split(\" \") number_reagents = int(x[0]) number_products", "reactants, z products. x = line.strip().split(\" \") number_reagents = int(x[0]) number_products = int(x[1])", "rows for line in open(in_file): if line.startswith(\"$RFMT\"): list_of_IDs.append(line.strip().split(\" \")[2]) # determine max no", "in a file # not necessary for reaxys, but it will go through", "or re.match( \".+REFERENCE.+\", current_line ): # this is the end of experimental block", "this 'convert' function Args: RDF_IN_FILE: original input RDF file including path RDF_OK_FILE: new", "rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Title if re.match(\".+TITLE\",", "line as \"seed\" for upcoming loop previous_line = seed_line for line in open(RDF_OK_FILE):", "used in replacements Returns: da_table (object): the (empty) pandas df working table max_reagents", "within the block if (current_line == \"$MOL\\n\") | (flag == 2): flag =", "my_table.columns[counter_reagents] ] = smiles counter_reagents += 1 else: my_table.loc[ rxn_id, my_table.columns[counter_products + max_reagents]", "line if re.match(\".+TITLE\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"):", "y reactants, z products. x = line.strip().split(\" \") number_reagents = int(x[0]) number_products =", "\", _e) continue mol.UpdatePropertyCache(strict=False) rdc.SanitizeMol( mol, sanitizeOps=( rdc.SANITIZE_ALL ^ rdc.SANITIZE_CLEANUP ^ rdc.SANITIZE_PROPERTIES ),", "this is the last entry in a file # not necessary for reaxys,", "current_column = previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") row_text = current_line.replace(\"\\n\", \" \") # flag =", "subdir, dirs, files in os.walk(RDF_IN): for file in files: if file.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(subdir,", "RDF file with corrections (if any) RDF_CSV_FILE: resulting CSV file (incl. path) Returns:", "or flag == 8: flag = 8 if re.match(\".+AUTHOR\", previous_line): current_column = (", "= current_line.strip().split(\" \") number_reagents = int(x[0]) number_products = int(x[1]) number_molecules = number_reagents +", "line if re.match(\".+NOTES\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"):", "\" \") flag = 5 else: # Reaxys if re.match(\".+TXT\", previous_line) or flag", "elif os.path.isdir(RDF_IN): for subdir, dirs, files in os.walk(RDF_IN): for file in files: if", "RDF_OK_FILE: new RDF file with corrections (if any) RDF_CSV_FILE: resulting CSV file (incl.", "flag = 0 continue # Get Citation if re.match(\".+CITATION\", previous_line) or flag ==", "max_reagents = 0 max_products = 0 for line in open(in_file): if line.startswith(\"$RXN\") |", "= generic # 7 = title # 9 = skip rxn_id = \"\"", "= 0 number_molecules = 0 iterate_molecules = 0 mol_string = \"\" rxn_id =", "multiple_row_text.replace( \"$DATUM \", \"\" ) multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \"", "get reaction ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) # flag = 0", "the block if (current_line == \"$MOL\\n\") | (flag == 2): flag = 2", "fixing) Parts of os.walk snippet originated on Reddit somewhere, forgot where though. Args:", "the end of title block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM", "regex=True) # need to remove NaN my_table.drop( list(my_table.filter(regex=\"COPYRIGHT\")), axis=1, inplace=True ) # skip", "= ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+NOTES\", current_line) or re.match( \".+REFERENCE.+\", current_line", "(less ifs and doesn't screw anything up) # flag = 0 # 0", "Reddit somewhere, forgot where though. Args: RDF_IN = filename, alt. directory and subdirectories", "flag = 0 continue # Get Title if re.match(\".+TITLE\", previous_line) or flag ==", "reset variables iterate_molecules = 0 molecule = [] mol_string = \"\" previous_line =", "+ 1 <= number_reagents: my_table.loc[ rxn_id, my_table.columns[counter_reagents] ] = smiles counter_reagents += 1", "file_list_ok = [] file_list_csv = [] if os.path.isfile(RDF_IN): if RDF_IN.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(RDF_IN)) file_list_ok.append(os.path.splitext(RDF_IN)[0]", "the copyright (optional) my_table.to_csv(RDF_CSV_FILE, sep=\"\\t\", header=True, index=True) # end of script # one", "rdc.SanitizeMol( mol, sanitizeOps=( rdc.SANITIZE_ALL ^ rdc.SANITIZE_CLEANUP ^ rdc.SANITIZE_PROPERTIES ), ) mol = rdMolStandardize.Normalize(mol)", "work as well; alt even global variable possible instead) Returns: SCI_REAX (str): \"RXN:\"", "in the loop, hence written out here. # end of fix section ####################", "= generic # 6 = notes, text potentially over multiple lines # 9", "finally, build the table da_table = pd.DataFrame( index=list_of_IDs, columns=list(OrderedDict.fromkeys(fields)) ) return da_table, max_reagents,", "end of citation block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \",", "current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this is the", "# 0 = generic # 7 = title # 9 = skip rxn_id", "9 = skip # 4 = citation rxn_id = \"\" multiple_row_text = \"\"", "sanitize=False) if mol is None: continue try: rdc.SanitizeMol(mol) except ValueError as _e: print(\"Error:", "my_table.loc[ rxn_id, my_table.columns[counter_reagents] ] = smiles counter_reagents += 1 else: my_table.loc[ rxn_id, my_table.columns[counter_products", "z product. x = current_line.strip().split(\" \") number_reagents = int(x[0]) number_products = int(x[1]) number_molecules", "will write first line of multiline columns as well # but doesn't matter", "multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 4 previous_line", "# # Nota bene: this will write first line of multiline columns as", "# End of file scanning # ############################################ # Finish table for export to", "skips them continue # after determining a block, find the molecules within the", "experimental block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\" )", "= str(current_line.strip().split(\" \")[2]) # flag = 0 continue if previous_line.startswith(\"$DTYPE\") and current_line.startswith(\"$DATUM\"): current_column", "8 if re.match(\".+AUTHOR\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"):", "############### GET MOLECULES ############# # (structure same for Reaxys and Scifinder) # flag", "range(number_molecules): molecule.append([]) if current_line == \"\\n\" or re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # checks for empty", "file_list_in = [] file_list_ok = [] file_list_csv = [] if os.path.isfile(RDF_IN): if RDF_IN.endswith((\"rdf\",", "start of a new reaction block if current_line.startswith(\"$RXN\") | flag == 1: flag", "the \" y z\" line. Not hard-coding this since it might change? #", "if previous_line.startswith(\"$DTYPE\") and current_line.startswith(\"$DATUM\"): current_column = previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") row_text = current_line.replace(\"\\n\", \"", "int(x[0]) number_products = int(x[1]) if number_reagents > max_reagents: max_reagents = number_reagents if number_products", "over multiple lines # 9 = skip rxn_id = \"\" multiple_row_text = \"\"", "continue # get experimental section if SCI_REAX == \"RXN:\": if re.match(\".+EXP_PROC\", previous_line) or", "= current_line.startswith( \"$RXN\" ) and previous_line.startswith(\"$RFMT\") # else for empty molecule block write_to_file", "columns as well # but doesn't matter since those will be correctly overwritten", "seed_line for line in open(RDF_OK_FILE): current_line = line # get reaction ID if", "multiple_row_text = \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 5 else:", "in open(RDF_OK_FILE): current_line = line # get reaction ID if current_line.startswith(\"$RFMT\"): rxn_id =", "the end of author block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM", "(structure same for Reaxys and Scifinder) # flag = 0 # 0 =", "if line.startswith(\"$RFMT\"): list_of_IDs.append(line.strip().split(\" \")[2]) # determine max no of reagents/products flag = 0", "################################ # ######## Extract title ######## # (only Scifinder) # flag = 0", "previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") row_text = current_line.replace(\"\\n\", \" \") # flag = 1 my_table.loc[rxn_id,", "rxn_id = \"\" multiple_row_text = \"\" previous_line = seed_line for line in open(RDF_OK_FILE):", "block if current_line.startswith(\"$RXN\") | flag == 1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line):", "True write_to_file = current_line.startswith( \"$RXN\" ) and previous_line.startswith(\"$RFMT\") # else for empty molecule", "in open(in_file): if line.startswith(\"$DTYPE\"): fields.append((line.strip().split(\" \")[1]).replace(SCI_REAX, \"\")) # finally, build the table da_table", "open(RDF_OK_FILE): current_line = line # get reaction ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\"", "single line data ########## # # Nota bene: this will write first line", "def build_empty_table(in_file: str, SCI_REAX: str): \"\"\"Scans file three times to build a pandas", "input RDF file including path RDF_OK_FILE: new RDF file with corrections (if any)", "4: flag = 4 if re.match(\".+CITATION\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\")", "return zip(file_list_in, file_list_ok, file_list_csv) def convert(RDF_IN_FILE: str, RDF_OK_FILE: str, RDF_CSV_FILE: str): \"\"\"original script", "first line as \"seed\" for upcoming loop previous_line = seed_line for line in", "range(NUMBER_OF_LINES): line_three = f.readline() return \"RXN:\" if re.match(\".+SCHEME\", line_three) else \"ROOT:\" def build_empty_table(in_file:", "\"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 8 previous_line = current_line", "current_line.replace(\"\\n\", \" \") flag = 8 previous_line = current_line ################################ # ### Extract", "file_list_ok, file_list_csv) def convert(RDF_IN_FILE: str, RDF_OK_FILE: str, RDF_CSV_FILE: str): \"\"\"original script with single", "= [] if os.path.isfile(RDF_IN): if RDF_IN.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(RDF_IN)) file_list_ok.append(os.path.splitext(RDF_IN)[0] + \"_fixed.rdf\") file_list_csv.append(os.path.splitext(RDF_IN)[0] +", "if line.startswith(\"$DTYPE\"): fields.append((line.strip().split(\" \")[1]).replace(SCI_REAX, \"\")) # finally, build the table da_table = pd.DataFrame(", "1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # analyse the \" y z\"", "current_line.replace(\"\\n\", \" \") flag = 6 previous_line = current_line ################################ # ######## Extract", "\"\") ) if current_line.startswith(\"$DTYPE\"): # this is the end of author block flag", "# This is done last, since for Scifinder # this is the last", "str: \"\"\"Determine if Scifinder or Reaxys rdf file (Scifinder contains 'SCHEME' in the", "9 = skip rxn_id = \"\" multiple_row_text = \"\" previous_line = seed_line for", "\") flag = 4 previous_line = current_line ################################ # End of file scanning", "flag = 0(?) # rebuild the string of a molecule counter_reagents = 0", ") zipped = zip(file_list_in, file_list_ok, file_list_csv) # note: zip gets unpacked upon usage", "loop previous_line = seed_line for line in open(RDF_OK_FILE): current_line = line # get", "\"\" multiple_row_text = \"\" previous_line = seed_line for line in open(RDF_OK_FILE): current_line =", "= current_line ################################ # ### Extract citation (i.e. source) ### # # This", "( current_line.startswith(\"$DTYPE\") and previous_line.startswith(\"$RFMT\") ) if write_to_file: file_out.write(previous_line) previous_line = current_line file_out.write(previous_line) #", "to conversion (i.e. fixing) Parts of os.walk snippet originated on Reddit somewhere, forgot", "with open(RDF_OK_FILE, \"w\") as file_out: write_to_file = True for current_line in open(RDF_IN_FILE): #", "= 0 num_mols_this_instance = len(molecule) # should always be max_mol now, so doesn't", "re.match(\".+NOTES\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this", "mol_string = \"\".join(molecule[mol]) if mol_string == \"\": smiles = \"\" else: mol =", "rdkit import RDLogger # Important, or else waaaay too many RDkit details in", "if re.match(\".+NOTES\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): #", "(str): filename of the corrected file (in principle, the original one would work", "Copyright (c) 2021 DocMinus \"\"\" import os import re import pandas as pd", "# yes, always same size within a *given file*, can change from file", "previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if current_line.startswith(\"$DTYPE\"): # this is the end of title", "\", file_in) convert(file_in, file_ok, file_csv) return zip(file_list_in, file_list_ok, file_list_csv) def convert(RDF_IN_FILE: str, RDF_OK_FILE:", "List of the new file names Order: input_file; fixed_file; csv_file \"\"\" file_list_in =", "dirs, files in os.walk(RDF_IN): for file in files: if file.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(subdir, file))", "as well; alt even global variable possible instead) Returns: SCI_REAX (str): \"RXN:\" (scifinder)", "if current_line == \"\\n\" or re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # checks for empty lines and", "rdc.MolToSmiles(mol) # some mols might be empty, this if/else positions reagents/products accordingly if", "table da_table = pd.DataFrame( index=list_of_IDs, columns=list(OrderedDict.fromkeys(fields)) ) return da_table, max_reagents, max_products ############################################################## #", "i in range(max_reagents): tmp_name = \"Reagent\" + str(i) fields.append(tmp_name) for i in range(max_products):", "previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+STP\", current_line): # this is the end of", "number_molecules = 0 iterate_molecules = 0 mol_string = \"\" rxn_id = \"\" multiple_row_text", "# create fresh list of max no of molecules, for use in $MOL", "section. spans over multiple line if re.match(\".+EXP_PROC\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX,", "to pandas table # ############### GET MOLECULES ############# # (structure same for Reaxys", "# Initialize Table and diverse variables # get string replacement variable depending on", "input_file; fixed_file; csv_file \"\"\" file_list_in = [] file_list_ok = [] file_list_csv = []", "== 7: flag = 7 # start of the Title section. might span", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\" Chemical RDF converter & fixer.", "re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # checks for empty lines and the number of molecules lines", "1 # end of the complete reaction block if current_line.startswith(\"$D\") & (previous_line ==", "is None: continue try: rdc.SanitizeMol(mol) except ValueError as _e: print(\"Error: \", _e) continue", "else \"ROOT:\" def build_empty_table(in_file: str, SCI_REAX: str): \"\"\"Scans file three times to build", "correct molecule block # True write_to_file = current_line.startswith( \"$RXN\" ) and previous_line.startswith(\"$RFMT\") #", "Scifinder) # flag = 0 # 0 = generic # 8 = authors", "\" \") flag = 7 previous_line = current_line ################################ # ####### Extract authors", "# 9 = skip molecule = [] number_reagents = 0 number_products = 0", "= skip rxn_id = \"\" multiple_row_text = \"\" previous_line = seed_line for line", "script independent of source Args: in_file (str): filename of the corrected file (in", "= previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") row_text = current_line.replace(\"\\n\", \" \") # flag = 1", "current_line.startswith(\"$RXN\") | flag == 1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # analyse", "\"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 5 else: # Reaxys", "\" y z\" line. # implies: y reactants, z products. x = line.strip().split(\"", "might span over multiple line if re.match(\".+NOTES\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX,", "if current_line.startswith(\"$DTYPE\"): # this is the end of title block flag = 9", "number_molecules): molecule[iterate_molecules].append(current_line) if current_line == \"M END\\n\": iterate_molecules += 1 # end of", "current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+STP\", current_line): # this is", "block if current_line.startswith(\"$D\") & (previous_line == \"M END\\n\"): flag = 9 # could", "flag = 0 continue if previous_line.startswith(\"$DTYPE\") and current_line.startswith(\"$DATUM\"): current_column = previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\")", "Extract title ######## # (only Scifinder) # flag = 0 # 0 =", "= 0 continue # Get Authors if re.match(\".+AUTHOR\", previous_line) or flag == 8:", "files specs. get max no of reagents & products at the same time.", "source SCI_REAX = scifi_or_reax(RDF_OK_FILE) # build table according to files specs. get max", "use in $MOL block # yes, always same size within a *given file*,", "0 molecule = [] mol_string = \"\" previous_line = current_line ################################ # #########", "on rxn_id = \"\" previous_line = seed_line for line in open(RDF_OK_FILE): current_line =", "the Notes section. might span over multiple line if re.match(\".+NOTES\", previous_line): current_column =", "script with single file usage wrapped into this 'convert' function Args: RDF_IN_FILE: original", "str(i) fields.append(tmp_name) for line in open(in_file): if line.startswith(\"$DTYPE\"): fields.append((line.strip().split(\" \")[1]).replace(SCI_REAX, \"\")) # finally,", "< number_molecules): molecule[iterate_molecules].append(current_line) if current_line == \"M END\\n\": iterate_molecules += 1 # end", "'SCHEME' in the enumeration) Returned string is multiple string.replace() methods, to render script", "author block flag = 9 my_table.loc[rxn_id, current_column] = multiple_row_text.replace( \"$DATUM \", \"\" )", "# 0 = generic # 9 = skip # 4 = citation rxn_id", "\" y z\" line. Not hard-coding this since it might change? # implies:", "0(?) # rebuild the string of a molecule counter_reagents = 0 counter_products =", "Extract Experimental Procedure ### # Multiline, both, # Reaxys and Scifinder # flag", "zip(file_list_in, file_list_ok, file_list_csv) # note: zip gets unpacked upon usage and disappears for", "multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 7 previous_line = current_line ################################ #", "rdMolStandardize.Normalize(mol) smiles = rdc.MolToSmiles(mol) # some mols might be empty, this if/else positions", "entries with open(RDF_IN_FILE) as file_in: seed_line = file_in.readline() previous_line = seed_line # get", "\") flag = 5 else: # Reaxys if re.match(\".+TXT\", previous_line) or flag ==", "get first line as \"seed\" for upcoming loop # seed_line is later reused", "sanitization and try/catch run by calling rdf_fixer.convert(filename or path) (optional: returns list of", "build the table da_table = pd.DataFrame( index=list_of_IDs, columns=list(OrderedDict.fromkeys(fields)) ) return da_table, max_reagents, max_products", "y reactants, z product. x = current_line.strip().split(\" \") number_reagents = int(x[0]) number_products =", "and try/catch run by calling rdf_fixer.convert(filename or path) (optional: returns list of new", ") if current_line.startswith(\"$DTYPE\"): # this is the end of citation block flag =", "or Reaxys rdf file (Scifinder contains 'SCHEME' in the enumeration) Returned string is", "for use in $MOL block # yes, always same size within a *given", "CSV file (incl. path) Returns: None - output are the new files. \"\"\"", "= current_line ################################ # ######## Extract title ######## # (only Scifinder) # flag", "str(current_line.strip().split(\" \")[2]) flag = 0 continue # start of a new reaction block", "and previous_line.startswith(\"$RFMT\") ) if write_to_file: file_out.write(previous_line) previous_line = current_line file_out.write(previous_line) # the last", "the enumeration) Returned string is multiple string.replace() methods, to render script independent of", "number for later positioning of reagents smiles in table max_products (int): <> (products)", "file_list_in.append(os.path.join(RDF_IN)) file_list_ok.append(os.path.splitext(RDF_IN)[0] + \"_fixed.rdf\") file_list_csv.append(os.path.splitext(RDF_IN)[0] + \".csv\") elif os.path.isdir(RDF_IN): for subdir, dirs, files", "# ######## Extract Notes ######## # (only Scifinder) # flag = 0 #", "else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 4 previous_line = current_line ################################", "range(max_reagents): tmp_name = \"Reagent\" + str(i) fields.append(tmp_name) for i in range(max_products): tmp_name =", "counter_reagents + 1 <= number_reagents: my_table.loc[ rxn_id, my_table.columns[counter_reagents] ] = smiles counter_reagents +=", "build the column headers fields = [] for i in range(max_reagents): tmp_name =", "if current_line.startswith(\"$RXN\") | flag == 1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): #", "this if/else positions reagents/products accordingly if counter_reagents + 1 <= number_reagents: my_table.loc[ rxn_id,", "file scanning # ############################################ # Finish table for export to csv file format", ") file_list_csv.append( os.path.join(subdir, os.path.splitext(file)[0] + \".csv\") ) zipped = zip(file_list_in, file_list_ok, file_list_csv) #", "\"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 5 previous_line = current_line", "= single MOL (molecules) # 9 = skip molecule = [] number_reagents =", "-> \"zipped\": \"\"\"Retrieving all .RDF files in a subdirectory recursively. Then submit to", "somewhere, forgot where though. Args: RDF_IN = filename, alt. directory and subdirectories to", "\")[1].replace(SCI_REAX, \"\") ) if re.match(\".+NOTES\", current_line) or re.match( \".+REFERENCE.+\", current_line ): # this", "the (empty) pandas df working table max_reagents (int): number for later positioning of", "END\\n\": iterate_molecules += 1 # end of the complete reaction block if current_line.startswith(\"$D\")", "# 4 = citation rxn_id = \"\" multiple_row_text = \"\" previous_line = seed_line", "+= 1 # end of the complete reaction block if current_line.startswith(\"$D\") & (previous_line", "new file names Order: input_file; fixed_file; csv_file \"\"\" file_list_in = [] file_list_ok =", "block # 2 = single MOL (molecules) # 9 = skip molecule =", "alt. directory and subdirectories to scan Returns: zipped List of the new file", "table # ############### GET MOLECULES ############# # (structure same for Reaxys and Scifinder)", "14:25:00 2021) Added mol sanitization and try/catch run by calling rdf_fixer.convert(filename or path)", "doesn't matter since those will be correctly overwritten later on rxn_id = \"\"", "str): \"\"\"original script with single file usage wrapped into this 'convert' function Args:", "if current_line.startswith(\"$DTYPE\"): # this is the end of Notes block flag = 9", "[] # i.e. rows for line in open(in_file): if line.startswith(\"$RFMT\"): list_of_IDs.append(line.strip().split(\" \")[2]) #", "current_line) or re.match( \".+REFERENCE.+\", current_line ): # this is the end of experimental", "multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 4 previous_line = current_line ################################ #", "license: MIT License Copyright (c) 2021 DocMinus \"\"\" import os import re import", "over multiple line if re.match(\".+NOTES\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") )", "previous_line.startswith(\"$DTYPE\") and current_line.startswith(\"$DATUM\"): current_column = previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") row_text = current_line.replace(\"\\n\", \" \")", "max_products: max_products = number_products flag = 0 # build the column headers fields", "\"M END\\n\": iterate_molecules += 1 # end of the complete reaction block if", "-*- \"\"\" Chemical RDF converter & fixer. Version 2.3 (Dec 28, 14:25:00 2021)", "(in principle, the original one would work as well; alt even global variable", "rdkit.Chem.MolStandardize import rdMolStandardize from rdkit import RDLogger # Important, or else waaaay too", "z\" line. Not hard-coding this since it might change? # implies: y reactants,", "# flag = 0 # 0 = generic # 6 = notes, text", "previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") ) if re.match(\".+NOTES\", current_line) or re.match( \".+REFERENCE.+\", current_line ): #", "max_products ############################################################## # Initialize Table and diverse variables # get string replacement variable", "not caught in the loop, hence written out here. # end of fix", "& fixer. Version 2.3 (Dec 28, 14:25:00 2021) Added mol sanitization and try/catch", ") if re.match(\".+STP\", current_line): # this is the end of experimental block flag", "mol sanitization and try/catch run by calling rdf_fixer.convert(filename or path) (optional: returns list", "GET MOLECULES ############# # (structure same for Reaxys and Scifinder) # flag =", "# ####### Extract authors ######## # (only Scifinder) # flag = 0 #", "mol.UpdatePropertyCache(strict=False) rdc.SanitizeMol( mol, sanitizeOps=( rdc.SANITIZE_ALL ^ rdc.SANITIZE_CLEANUP ^ rdc.SANITIZE_PROPERTIES ), ) mol =", "from rdkit import RDLogger # Important, or else waaaay too many RDkit details", "= str(current_line.strip().split(\" \")[2]) flag = 0 continue # Get Authors if re.match(\".+AUTHOR\", previous_line)", "molecules within the block if (current_line == \"$MOL\\n\") | (flag == 2): flag", "= seed_line # get first line as \"seed\" for upcoming loop # seed_line", "# seed_line is later reused again with open(RDF_OK_FILE, \"w\") as file_out: write_to_file =", "new filenames) @author: <NAME> (DocMinus) license: MIT License Copyright (c) 2021 DocMinus \"\"\"", "recursively. Then submit to conversion (i.e. fixing) Parts of os.walk snippet originated on", "\"RXN:\" (scifinder) or string \"ROOT:\" (reaxys) used in replacements Returns: da_table (object): the", "open(in_file): if line.startswith(\"$RFMT\"): list_of_IDs.append(line.strip().split(\" \")[2]) # determine max no of reagents/products flag =", "conversion (i.e. fixing) Parts of os.walk snippet originated on Reddit somewhere, forgot where", "= 7 previous_line = current_line ################################ # ####### Extract authors ######## # (only", "Get Authors if re.match(\".+AUTHOR\", previous_line) or flag == 8: flag = 8 if", "= 0 continue if previous_line.startswith(\"$DTYPE\") and current_line.startswith(\"$DATUM\"): current_column = previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") row_text", "file_list_csv) # note: zip gets unpacked upon usage and disappears for file_in, file_ok,", "# 5 = exp procedure text over multiple lines # 9 = skip", "fix section #################### def scifi_or_reax(in_file: str) -> str: \"\"\"Determine if Scifinder or Reaxys", "re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # analyse the \" y z\" line. Not hard-coding this since", "variables iterate_molecules = 0 molecule = [] mol_string = \"\" previous_line = current_line", "rdc.MolFromMolBlock(mol_string, sanitize=False) if mol is None: continue try: rdc.SanitizeMol(mol) except ValueError as _e:", "files in os.walk(RDF_IN): for file in files: if file.endswith((\"rdf\", \"RDF\")): file_list_in.append(os.path.join(subdir, file)) file_list_ok.append(", "current_line.strip().split(\" \") number_reagents = int(x[0]) number_products = int(x[1]) number_molecules = number_reagents + number_products", "# get experimental section if SCI_REAX == \"RXN:\": if re.match(\".+EXP_PROC\", previous_line) or flag", "flag = 5 previous_line = current_line ################################ # ######## Extract Notes ######## #", "file: \", file_in) convert(file_in, file_ok, file_csv) return zip(file_list_in, file_list_ok, file_list_csv) def convert(RDF_IN_FILE: str,", "\") number_reagents = int(x[0]) number_products = int(x[1]) if number_reagents > max_reagents: max_reagents =", "(current_line == \"$MOL\\n\") | (flag == 2): flag = 2 if current_line !=", "= [] for i in range(max_reagents): tmp_name = \"Reagent\" + str(i) fields.append(tmp_name) for", "authors ######## # (only Scifinder) # flag = 0 # 0 = generic", "the string of a molecule counter_reagents = 0 counter_products = 0 num_mols_this_instance =", "done last, since for Scifinder # this is the last entry in a", "= str(current_line.strip().split(\" \")[2]) flag = 0 continue # get experimental section if SCI_REAX", "= \"Reagent\" + str(i) fields.append(tmp_name) for i in range(max_products): tmp_name = \"Product\" +", "current_line.replace(\"\\n\", \" \") flag = 5 else: # Reaxys if re.match(\".+TXT\", previous_line) or", "Reaxys and Scifinder # flag = 0 # 0 = generic # 5", "fixer. Version 2.3 (Dec 28, 14:25:00 2021) Added mol sanitization and try/catch run", "= [] number_reagents = 0 number_products = 0 number_molecules = 0 iterate_molecules =", "None: continue try: rdc.SanitizeMol(mol) except ValueError as _e: print(\"Error: \", _e) continue mol.UpdatePropertyCache(strict=False)", "= authors # 9 = skip rxn_id = \"\" multiple_row_text = \"\" previous_line", "usage and disappears for file_in, file_ok, file_csv in zipped: print(\"Converting file: \", file_in)", "them continue # after determining a block, find the molecules within the block", "get reaction ID if current_line.startswith(\"$RFMT\"): rxn_id = str(current_line.strip().split(\" \")[2]) flag = 0 continue", "### # Multiline, both, # Reaxys and Scifinder # flag = 0 #", "Scifinder # this is the last entry in a file # not necessary", "line as \"seed\" for upcoming loop # seed_line is later reused again with", "\"\"\"Determine if Scifinder or Reaxys rdf file (Scifinder contains 'SCHEME' in the enumeration)", "current_line.startswith(\"$DTYPE\"): # this is the end of Notes block flag = 9 my_table.loc[rxn_id,", "lines # 9 = skip rxn_id = \"\" multiple_row_text = \"\" previous_line =", "rxn_id = \"\" previous_line = seed_line for line in open(RDF_OK_FILE): current_line = line", "flag = 0 continue # Get Notes if re.match(\".+NOTES\", previous_line) or flag ==", "where though. Args: RDF_IN = filename, alt. directory and subdirectories to scan Returns:", "single file usage wrapped into this 'convert' function Args: RDF_IN_FILE: original input RDF", "# the last line is not caught in the loop, hence written out", "or flag == 4: flag = 4 if re.match(\".+CITATION\", previous_line): current_column = (", "= rdc.MolToSmiles(mol) # some mols might be empty, this if/else positions reagents/products accordingly", "== \"\\n\" or re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # checks for empty lines and the number", "if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", current_line): # analyse the \" y z\" line. Not hard-coding this", "flag = 7 previous_line = current_line ################################ # ####### Extract authors ######## #", "& (previous_line == \"M END\\n\"): flag = 9 # could just use flag", "if re.match(\".+NOTES\", current_line) or re.match( \".+REFERENCE.+\", current_line ): # this is the end", "file_out.write(previous_line) # the last line is not caught in the loop, hence written", "being written twice if current_line.startswith(\"$RDFILE\") and previous_line.startswith( \"$RDFILE\" ): continue # correct molecule", "number_reagents = 0 number_products = 0 number_molecules = 0 iterate_molecules = 0 mol_string", "file_in) convert(file_in, file_ok, file_csv) return zip(file_list_in, file_list_ok, file_list_csv) def convert(RDF_IN_FILE: str, RDF_OK_FILE: str,", "with corrections (if any) RDF_CSV_FILE: resulting CSV file (incl. path) Returns: None -", "iterate_molecules = 0 molecule = [] mol_string = \"\" previous_line = current_line ################################", "Then submit to conversion (i.e. fixing) Parts of os.walk snippet originated on Reddit", "previous_line = current_line ################################ # End of file scanning # ############################################ # Finish", "Scifinder) # flag = 0 # 0 = generic # 6 = notes,", "# (structure same for Reaxys and Scifinder) # flag = 0 # 0", "get first line as \"seed\" for upcoming loop previous_line = seed_line for line", "to files specs. get max no of reagents & products at the same", "= 0 mol_string = \"\" rxn_id = \"\" multiple_row_text = \"\" # get", "filename of the corrected file (in principle, the original one would work as", "= 5 previous_line = current_line ################################ # ######## Extract Notes ######## # (only", "working table max_reagents (int): number for later positioning of reagents smiles in table", "0 num_mols_this_instance = len(molecule) # should always be max_mol now, so doesn't matter", "flag = 4 if re.match(\".+CITATION\", previous_line): current_column = ( previous_line.strip().split(\" \")[1].replace(SCI_REAX, \"\") )", "\"\": smiles = \"\" else: mol = rdc.MolFromMolBlock(mol_string, sanitize=False) if mol is None:", "# Get Notes if re.match(\".+NOTES\", previous_line) or flag == 6: flag = 6", "block # True write_to_file = current_line.startswith( \"$RXN\" ) and previous_line.startswith(\"$RFMT\") # else for", "str, RDF_OK_FILE: str, RDF_CSV_FILE: str): \"\"\"original script with single file usage wrapped into", "if current_line == \"M END\\n\": iterate_molecules += 1 # end of the complete", "upon usage and disappears for file_in, file_ok, file_csv in zipped: print(\"Converting file: \",", "# get first line as \"seed\" for upcoming loop # seed_line is later", "df used as main table Args: in_file (str): filename of the corrected file:", "rdc.SanitizeMol(mol) except ValueError as _e: print(\"Error: \", _e) continue mol.UpdatePropertyCache(strict=False) rdc.SanitizeMol( mol, sanitizeOps=(", "of new filenames) @author: <NAME> (DocMinus) license: MIT License Copyright (c) 2021 DocMinus", "= \"\" else: multiple_row_text += current_line.replace(\"\\n\", \" \") flag = 6 previous_line =", "== 1: flag = 1 if re.match(\"\\s\\s[0-9]\\s\\s[0-9]\\n\", line): # analyse the \" y", "new files. \"\"\" ############################################################## # Fix erroneous entries (empty mols) by deleting those", "authors # 9 = skip rxn_id = \"\" multiple_row_text = \"\" previous_line =" ]
[ "= f.read() f.close() return text except OSError: print(\"Session File not found\") return \"00000\"", "machine.reset() def help1(): print(\"This is a WIP help menu\") print(\"Some description\") # Reads", "def reboot(): machine.reset() def help1(): print(\"This is a WIP help menu\") print(\"Some description\")", "print(\"Entering Edit Mode gracefully\") import editMode editMode.main() else: print(\"Entering Work Mode\") import workMode", "= open(SESSION_FILENAME, 'w') f.write(str(bootMode)) f.write(str(param1)) f.write(str(param2)) f.close() def getCrashReport(): import sys sys.print_exception(exceptionMessage, sys.stderr)", "import sys sys.print_exception(exceptionMessage, sys.stderr) # Decide whether to go into workMode or editMode", "def getCrashReport(): import sys sys.print_exception(exceptionMessage, sys.stderr) # Decide whether to go into workMode", "<gh_stars>0 # This file is executed on every boot (including wake-boot from deepsleep)", "(including wake-boot from deepsleep) import esp import machine esp.osdebug(None) SESSION_FILENAME = 'SessionData.txt' exceptionMessage", "f.write(str(param1)) f.write(str(param2)) f.close() def getCrashReport(): import sys sys.print_exception(exceptionMessage, sys.stderr) # Decide whether to", "data about the light state before the restart, used after restart def readSession():", "workMode.main() machine.reset() except Exception as exc: global exceptionMessage exceptionMessage = exc import triac", "return text except OSError: print(\"Session File not found\") return \"00000\" # Saves data", "int(text[0]) == 0: print(\"Entering Edit Mode gracefully\") import editMode editMode.main() else: print(\"Entering Work", "wake-boot from deepsleep) import esp import machine esp.osdebug(None) SESSION_FILENAME = 'SessionData.txt' exceptionMessage =", "WIP help menu\") print(\"Some description\") # Reads data about the light state before", "import machine esp.osdebug(None) SESSION_FILENAME = 'SessionData.txt' exceptionMessage = None def reboot(): machine.reset() def", "restart, used after restart def readSession(): try: f = open(SESSION_FILENAME, 'r') text =", "workMode workMode.main() machine.reset() except Exception as exc: global exceptionMessage exceptionMessage = exc import", "try: f = open(SESSION_FILENAME, 'r') text = f.read() f.close() return text except OSError:", "a WIP help menu\") print(\"Some description\") # Reads data about the light state", "exceptionMessage = None def reboot(): machine.reset() def help1(): print(\"This is a WIP help", "Reads data about the light state before the restart, used after restart def", "esp.osdebug(None) SESSION_FILENAME = 'SessionData.txt' exceptionMessage = None def reboot(): machine.reset() def help1(): print(\"This", "f.read() f.close() return text except OSError: print(\"Session File not found\") return \"00000\" #", "the restart, used after restart def readSession(): try: f = open(SESSION_FILENAME, 'r') text", "text = readSession() if int(text[0]) == 0: print(\"Entering Edit Mode gracefully\") import editMode", "Work Mode\") import workMode workMode.main() machine.reset() except Exception as exc: global exceptionMessage exceptionMessage", "sys sys.print_exception(exceptionMessage, sys.stderr) # Decide whether to go into workMode or editMode def", "else: print(\"Entering Work Mode\") import workMode workMode.main() machine.reset() except Exception as exc: global", "found\") return \"00000\" # Saves data about the light state before restarting def", "def readSession(): try: f = open(SESSION_FILENAME, 'r') text = f.read() f.close() return text", "# Decide whether to go into workMode or editMode def main(): import gc", "as exc: global exceptionMessage exceptionMessage = exc import triac triac.activate(0) import editMode editMode.main()", "text = f.read() f.close() return text except OSError: print(\"Session File not found\") return", "after restart def readSession(): try: f = open(SESSION_FILENAME, 'r') text = f.read() f.close()", "= None def reboot(): machine.reset() def help1(): print(\"This is a WIP help menu\")", "is a WIP help menu\") print(\"Some description\") # Reads data about the light", "print(\"Some description\") # Reads data about the light state before the restart, used", "== 0: print(\"Entering Edit Mode gracefully\") import editMode editMode.main() else: print(\"Entering Work Mode\")", "except Exception as exc: global exceptionMessage exceptionMessage = exc import triac triac.activate(0) import", "state before the restart, used after restart def readSession(): try: f = open(SESSION_FILENAME,", "state before restarting def saveSession(bootMode, param1, param2): f = open(SESSION_FILENAME, 'w') f.write(str(bootMode)) f.write(str(param1))", "OSError: print(\"Session File not found\") return \"00000\" # Saves data about the light", "readSession() if int(text[0]) == 0: print(\"Entering Edit Mode gracefully\") import editMode editMode.main() else:", "f = open(SESSION_FILENAME, 'w') f.write(str(bootMode)) f.write(str(param1)) f.write(str(param2)) f.close() def getCrashReport(): import sys sys.print_exception(exceptionMessage,", "from deepsleep) import esp import machine esp.osdebug(None) SESSION_FILENAME = 'SessionData.txt' exceptionMessage = None", "gc.collect() try: text = readSession() if int(text[0]) == 0: print(\"Entering Edit Mode gracefully\")", "import esp import machine esp.osdebug(None) SESSION_FILENAME = 'SessionData.txt' exceptionMessage = None def reboot():", "exc: global exceptionMessage exceptionMessage = exc import triac triac.activate(0) import editMode editMode.main() if", "help1(): print(\"This is a WIP help menu\") print(\"Some description\") # Reads data about", "cm.setWifi(True) gc.collect() try: text = readSession() if int(text[0]) == 0: print(\"Entering Edit Mode", "menu\") print(\"Some description\") # Reads data about the light state before the restart,", "'SessionData.txt' exceptionMessage = None def reboot(): machine.reset() def help1(): print(\"This is a WIP", "param1, param2): f = open(SESSION_FILENAME, 'w') f.write(str(bootMode)) f.write(str(param1)) f.write(str(param2)) f.close() def getCrashReport(): import", "None def reboot(): machine.reset() def help1(): print(\"This is a WIP help menu\") print(\"Some", "restarting def saveSession(bootMode, param1, param2): f = open(SESSION_FILENAME, 'w') f.write(str(bootMode)) f.write(str(param1)) f.write(str(param2)) f.close()", "= 'SessionData.txt' exceptionMessage = None def reboot(): machine.reset() def help1(): print(\"This is a", "try: text = readSession() if int(text[0]) == 0: print(\"Entering Edit Mode gracefully\") import", "'r') text = f.read() f.close() return text except OSError: print(\"Session File not found\")", "def main(): import gc import connectionManager as cm cm.setWifi(True) gc.collect() try: text =", "saveSession(bootMode, param1, param2): f = open(SESSION_FILENAME, 'w') f.write(str(bootMode)) f.write(str(param1)) f.write(str(param2)) f.close() def getCrashReport():", "reboot(): machine.reset() def help1(): print(\"This is a WIP help menu\") print(\"Some description\") #", "\"00000\" # Saves data about the light state before restarting def saveSession(bootMode, param1,", "# Reads data about the light state before the restart, used after restart", "every boot (including wake-boot from deepsleep) import esp import machine esp.osdebug(None) SESSION_FILENAME =", "except OSError: print(\"Session File not found\") return \"00000\" # Saves data about the", "exceptionMessage = exc import triac triac.activate(0) import editMode editMode.main() if __name__ == \"__main__\":", "f.write(str(param2)) f.close() def getCrashReport(): import sys sys.print_exception(exceptionMessage, sys.stderr) # Decide whether to go", "sys.stderr) # Decide whether to go into workMode or editMode def main(): import", "description\") # Reads data about the light state before the restart, used after", "not found\") return \"00000\" # Saves data about the light state before restarting", "Mode\") import workMode workMode.main() machine.reset() except Exception as exc: global exceptionMessage exceptionMessage =", "'w') f.write(str(bootMode)) f.write(str(param1)) f.write(str(param2)) f.close() def getCrashReport(): import sys sys.print_exception(exceptionMessage, sys.stderr) # Decide", "def saveSession(bootMode, param1, param2): f = open(SESSION_FILENAME, 'w') f.write(str(bootMode)) f.write(str(param1)) f.write(str(param2)) f.close() def", "text except OSError: print(\"Session File not found\") return \"00000\" # Saves data about", "f.close() def getCrashReport(): import sys sys.print_exception(exceptionMessage, sys.stderr) # Decide whether to go into", "gracefully\") import editMode editMode.main() else: print(\"Entering Work Mode\") import workMode workMode.main() machine.reset() except", "or editMode def main(): import gc import connectionManager as cm cm.setWifi(True) gc.collect() try:", "f.write(str(bootMode)) f.write(str(param1)) f.write(str(param2)) f.close() def getCrashReport(): import sys sys.print_exception(exceptionMessage, sys.stderr) # Decide whether", "on every boot (including wake-boot from deepsleep) import esp import machine esp.osdebug(None) SESSION_FILENAME", "print(\"This is a WIP help menu\") print(\"Some description\") # Reads data about the", "= exc import triac triac.activate(0) import editMode editMode.main() if __name__ == \"__main__\": main()", "print(\"Entering Work Mode\") import workMode workMode.main() machine.reset() except Exception as exc: global exceptionMessage", "light state before restarting def saveSession(bootMode, param1, param2): f = open(SESSION_FILENAME, 'w') f.write(str(bootMode))", "machine.reset() except Exception as exc: global exceptionMessage exceptionMessage = exc import triac triac.activate(0)", "0: print(\"Entering Edit Mode gracefully\") import editMode editMode.main() else: print(\"Entering Work Mode\") import", "boot (including wake-boot from deepsleep) import esp import machine esp.osdebug(None) SESSION_FILENAME = 'SessionData.txt'", "global exceptionMessage exceptionMessage = exc import triac triac.activate(0) import editMode editMode.main() if __name__", "is executed on every boot (including wake-boot from deepsleep) import esp import machine", "This file is executed on every boot (including wake-boot from deepsleep) import esp", "whether to go into workMode or editMode def main(): import gc import connectionManager", "= readSession() if int(text[0]) == 0: print(\"Entering Edit Mode gracefully\") import editMode editMode.main()", "File not found\") return \"00000\" # Saves data about the light state before", "sys.print_exception(exceptionMessage, sys.stderr) # Decide whether to go into workMode or editMode def main():", "to go into workMode or editMode def main(): import gc import connectionManager as", "Exception as exc: global exceptionMessage exceptionMessage = exc import triac triac.activate(0) import editMode", "restart def readSession(): try: f = open(SESSION_FILENAME, 'r') text = f.read() f.close() return", "return \"00000\" # Saves data about the light state before restarting def saveSession(bootMode,", "open(SESSION_FILENAME, 'w') f.write(str(bootMode)) f.write(str(param1)) f.write(str(param2)) f.close() def getCrashReport(): import sys sys.print_exception(exceptionMessage, sys.stderr) #", "into workMode or editMode def main(): import gc import connectionManager as cm cm.setWifi(True)", "Saves data about the light state before restarting def saveSession(bootMode, param1, param2): f", "help menu\") print(\"Some description\") # Reads data about the light state before the", "the light state before restarting def saveSession(bootMode, param1, param2): f = open(SESSION_FILENAME, 'w')", "open(SESSION_FILENAME, 'r') text = f.read() f.close() return text except OSError: print(\"Session File not", "gc import connectionManager as cm cm.setWifi(True) gc.collect() try: text = readSession() if int(text[0])", "esp import machine esp.osdebug(None) SESSION_FILENAME = 'SessionData.txt' exceptionMessage = None def reboot(): machine.reset()", "the light state before the restart, used after restart def readSession(): try: f", "Edit Mode gracefully\") import editMode editMode.main() else: print(\"Entering Work Mode\") import workMode workMode.main()", "= open(SESSION_FILENAME, 'r') text = f.read() f.close() return text except OSError: print(\"Session File", "editMode editMode.main() else: print(\"Entering Work Mode\") import workMode workMode.main() machine.reset() except Exception as", "machine esp.osdebug(None) SESSION_FILENAME = 'SessionData.txt' exceptionMessage = None def reboot(): machine.reset() def help1():", "print(\"Session File not found\") return \"00000\" # Saves data about the light state", "def help1(): print(\"This is a WIP help menu\") print(\"Some description\") # Reads data", "light state before the restart, used after restart def readSession(): try: f =", "param2): f = open(SESSION_FILENAME, 'w') f.write(str(bootMode)) f.write(str(param1)) f.write(str(param2)) f.close() def getCrashReport(): import sys", "# This file is executed on every boot (including wake-boot from deepsleep) import", "f = open(SESSION_FILENAME, 'r') text = f.read() f.close() return text except OSError: print(\"Session", "about the light state before restarting def saveSession(bootMode, param1, param2): f = open(SESSION_FILENAME,", "Decide whether to go into workMode or editMode def main(): import gc import", "cm cm.setWifi(True) gc.collect() try: text = readSession() if int(text[0]) == 0: print(\"Entering Edit", "editMode.main() else: print(\"Entering Work Mode\") import workMode workMode.main() machine.reset() except Exception as exc:", "main(): import gc import connectionManager as cm cm.setWifi(True) gc.collect() try: text = readSession()", "editMode def main(): import gc import connectionManager as cm cm.setWifi(True) gc.collect() try: text", "about the light state before the restart, used after restart def readSession(): try:", "if int(text[0]) == 0: print(\"Entering Edit Mode gracefully\") import editMode editMode.main() else: print(\"Entering", "Mode gracefully\") import editMode editMode.main() else: print(\"Entering Work Mode\") import workMode workMode.main() machine.reset()", "data about the light state before restarting def saveSession(bootMode, param1, param2): f =", "SESSION_FILENAME = 'SessionData.txt' exceptionMessage = None def reboot(): machine.reset() def help1(): print(\"This is", "import connectionManager as cm cm.setWifi(True) gc.collect() try: text = readSession() if int(text[0]) ==", "file is executed on every boot (including wake-boot from deepsleep) import esp import", "getCrashReport(): import sys sys.print_exception(exceptionMessage, sys.stderr) # Decide whether to go into workMode or", "import workMode workMode.main() machine.reset() except Exception as exc: global exceptionMessage exceptionMessage = exc", "f.close() return text except OSError: print(\"Session File not found\") return \"00000\" # Saves", "deepsleep) import esp import machine esp.osdebug(None) SESSION_FILENAME = 'SessionData.txt' exceptionMessage = None def", "import gc import connectionManager as cm cm.setWifi(True) gc.collect() try: text = readSession() if", "exceptionMessage exceptionMessage = exc import triac triac.activate(0) import editMode editMode.main() if __name__ ==", "connectionManager as cm cm.setWifi(True) gc.collect() try: text = readSession() if int(text[0]) == 0:", "go into workMode or editMode def main(): import gc import connectionManager as cm", "before the restart, used after restart def readSession(): try: f = open(SESSION_FILENAME, 'r')", "executed on every boot (including wake-boot from deepsleep) import esp import machine esp.osdebug(None)", "# Saves data about the light state before restarting def saveSession(bootMode, param1, param2):", "readSession(): try: f = open(SESSION_FILENAME, 'r') text = f.read() f.close() return text except", "import editMode editMode.main() else: print(\"Entering Work Mode\") import workMode workMode.main() machine.reset() except Exception", "as cm cm.setWifi(True) gc.collect() try: text = readSession() if int(text[0]) == 0: print(\"Entering", "workMode or editMode def main(): import gc import connectionManager as cm cm.setWifi(True) gc.collect()", "used after restart def readSession(): try: f = open(SESSION_FILENAME, 'r') text = f.read()", "before restarting def saveSession(bootMode, param1, param2): f = open(SESSION_FILENAME, 'w') f.write(str(bootMode)) f.write(str(param1)) f.write(str(param2))" ]
[ "IMAGE = f\"ruterstop:python{PYVER}\" print(\"Building\", IMAGE) run( [ \"docker\", \"build\", \"--network=host\", \"--file=.deploy/Dockerfile\", f\"--build-arg=PYTHON_VERSION={PYVER}\", f\"--build-arg=POETRY_VERSION=1.1.5\",", "check=True, ) print(\"Running unit-tests\", IMAGE) run( [ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ]", "f\"ruterstop:python{PYVER}\" print(\"Building\", IMAGE) run( [ \"docker\", \"build\", \"--network=host\", \"--file=.deploy/Dockerfile\", f\"--build-arg=PYTHON_VERSION={PYVER}\", f\"--build-arg=POETRY_VERSION=1.1.5\", f\"--tag=ruterstop:python{PYVER}\", \".\",", "print(\"Running livetest\", IMAGE) run( [ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ] + [\"ruterstop\",", "] + [\"unittest\"], check=True, ) print(\"Running livetest\", IMAGE) run( [ \"docker\", \"run\", \"--network=host\",", "\"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ] + [\"unittest\"], check=True, ) print(\"Running livetest\", IMAGE)", "python3 from subprocess import run from sys import argv, exit PYVER = argv[1]", "argv[1] IMAGE = f\"ruterstop:python{PYVER}\" print(\"Building\", IMAGE) run( [ \"docker\", \"build\", \"--network=host\", \"--file=.deploy/Dockerfile\", f\"--build-arg=PYTHON_VERSION={PYVER}\",", "f\"--build-arg=PYTHON_VERSION={PYVER}\", f\"--build-arg=POETRY_VERSION=1.1.5\", f\"--tag=ruterstop:python{PYVER}\", \".\", ], check=True, ) print(\"Running unit-tests\", IMAGE) run( [ \"docker\",", "[ \"docker\", \"build\", \"--network=host\", \"--file=.deploy/Dockerfile\", f\"--build-arg=PYTHON_VERSION={PYVER}\", f\"--build-arg=POETRY_VERSION=1.1.5\", f\"--tag=ruterstop:python{PYVER}\", \".\", ], check=True, ) print(\"Running", "\"build\", \"--network=host\", \"--file=.deploy/Dockerfile\", f\"--build-arg=PYTHON_VERSION={PYVER}\", f\"--build-arg=POETRY_VERSION=1.1.5\", f\"--tag=ruterstop:python{PYVER}\", \".\", ], check=True, ) print(\"Running unit-tests\", IMAGE)", "\"run\", \"--network=host\", \"--rm\", IMAGE, ] + [\"unittest\"], check=True, ) print(\"Running livetest\", IMAGE) run(", "\"--network=host\", \"--file=.deploy/Dockerfile\", f\"--build-arg=PYTHON_VERSION={PYVER}\", f\"--build-arg=POETRY_VERSION=1.1.5\", f\"--tag=ruterstop:python{PYVER}\", \".\", ], check=True, ) print(\"Running unit-tests\", IMAGE) run(", "\"docker\", \"build\", \"--network=host\", \"--file=.deploy/Dockerfile\", f\"--build-arg=PYTHON_VERSION={PYVER}\", f\"--build-arg=POETRY_VERSION=1.1.5\", f\"--tag=ruterstop:python{PYVER}\", \".\", ], check=True, ) print(\"Running unit-tests\",", ") print(\"Running livetest\", IMAGE) run( [ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ] +", "IMAGE) run( [ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ] + [\"ruterstop\", \"--stop-id=6013\"], check=True,", "[\"unittest\"], check=True, ) print(\"Running livetest\", IMAGE) run( [ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE,", "argv, exit PYVER = argv[1] IMAGE = f\"ruterstop:python{PYVER}\" print(\"Building\", IMAGE) run( [ \"docker\",", "from subprocess import run from sys import argv, exit PYVER = argv[1] IMAGE", "IMAGE, ] + [\"unittest\"], check=True, ) print(\"Running livetest\", IMAGE) run( [ \"docker\", \"run\",", ") print(\"Running unit-tests\", IMAGE) run( [ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ] +", "run( [ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ] + [\"unittest\"], check=True, ) print(\"Running", "#!/usr/bin/env python3 from subprocess import run from sys import argv, exit PYVER =", "= f\"ruterstop:python{PYVER}\" print(\"Building\", IMAGE) run( [ \"docker\", \"build\", \"--network=host\", \"--file=.deploy/Dockerfile\", f\"--build-arg=PYTHON_VERSION={PYVER}\", f\"--build-arg=POETRY_VERSION=1.1.5\", f\"--tag=ruterstop:python{PYVER}\",", "unit-tests\", IMAGE) run( [ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ] + [\"unittest\"], check=True,", "run from sys import argv, exit PYVER = argv[1] IMAGE = f\"ruterstop:python{PYVER}\" print(\"Building\",", "= argv[1] IMAGE = f\"ruterstop:python{PYVER}\" print(\"Building\", IMAGE) run( [ \"docker\", \"build\", \"--network=host\", \"--file=.deploy/Dockerfile\",", "print(\"Running unit-tests\", IMAGE) run( [ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ] + [\"unittest\"],", "import run from sys import argv, exit PYVER = argv[1] IMAGE = f\"ruterstop:python{PYVER}\"", "\"--file=.deploy/Dockerfile\", f\"--build-arg=PYTHON_VERSION={PYVER}\", f\"--build-arg=POETRY_VERSION=1.1.5\", f\"--tag=ruterstop:python{PYVER}\", \".\", ], check=True, ) print(\"Running unit-tests\", IMAGE) run( [", "\".\", ], check=True, ) print(\"Running unit-tests\", IMAGE) run( [ \"docker\", \"run\", \"--network=host\", \"--rm\",", "[ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ] + [\"unittest\"], check=True, ) print(\"Running livetest\",", "f\"--tag=ruterstop:python{PYVER}\", \".\", ], check=True, ) print(\"Running unit-tests\", IMAGE) run( [ \"docker\", \"run\", \"--network=host\",", "run( [ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ] + [\"ruterstop\", \"--stop-id=6013\"], check=True, )", "f\"--build-arg=POETRY_VERSION=1.1.5\", f\"--tag=ruterstop:python{PYVER}\", \".\", ], check=True, ) print(\"Running unit-tests\", IMAGE) run( [ \"docker\", \"run\",", "import argv, exit PYVER = argv[1] IMAGE = f\"ruterstop:python{PYVER}\" print(\"Building\", IMAGE) run( [", "sys import argv, exit PYVER = argv[1] IMAGE = f\"ruterstop:python{PYVER}\" print(\"Building\", IMAGE) run(", "print(\"Building\", IMAGE) run( [ \"docker\", \"build\", \"--network=host\", \"--file=.deploy/Dockerfile\", f\"--build-arg=PYTHON_VERSION={PYVER}\", f\"--build-arg=POETRY_VERSION=1.1.5\", f\"--tag=ruterstop:python{PYVER}\", \".\", ],", "+ [\"unittest\"], check=True, ) print(\"Running livetest\", IMAGE) run( [ \"docker\", \"run\", \"--network=host\", \"--rm\",", "\"--network=host\", \"--rm\", IMAGE, ] + [\"unittest\"], check=True, ) print(\"Running livetest\", IMAGE) run( [", "from sys import argv, exit PYVER = argv[1] IMAGE = f\"ruterstop:python{PYVER}\" print(\"Building\", IMAGE)", "\"--rm\", IMAGE, ] + [\"unittest\"], check=True, ) print(\"Running livetest\", IMAGE) run( [ \"docker\",", "run( [ \"docker\", \"build\", \"--network=host\", \"--file=.deploy/Dockerfile\", f\"--build-arg=PYTHON_VERSION={PYVER}\", f\"--build-arg=POETRY_VERSION=1.1.5\", f\"--tag=ruterstop:python{PYVER}\", \".\", ], check=True, )", "[ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ] + [\"ruterstop\", \"--stop-id=6013\"], check=True, ) print(\"Success!\")", "], check=True, ) print(\"Running unit-tests\", IMAGE) run( [ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE,", "exit PYVER = argv[1] IMAGE = f\"ruterstop:python{PYVER}\" print(\"Building\", IMAGE) run( [ \"docker\", \"build\",", "IMAGE) run( [ \"docker\", \"build\", \"--network=host\", \"--file=.deploy/Dockerfile\", f\"--build-arg=PYTHON_VERSION={PYVER}\", f\"--build-arg=POETRY_VERSION=1.1.5\", f\"--tag=ruterstop:python{PYVER}\", \".\", ], check=True,", "IMAGE) run( [ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ] + [\"unittest\"], check=True, )", "livetest\", IMAGE) run( [ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ] + [\"ruterstop\", \"--stop-id=6013\"],", "subprocess import run from sys import argv, exit PYVER = argv[1] IMAGE =", "check=True, ) print(\"Running livetest\", IMAGE) run( [ \"docker\", \"run\", \"--network=host\", \"--rm\", IMAGE, ]", "PYVER = argv[1] IMAGE = f\"ruterstop:python{PYVER}\" print(\"Building\", IMAGE) run( [ \"docker\", \"build\", \"--network=host\"," ]
[ "np.mean(posterior, axis=1) init_distribution = np.ones(num_states)/num_states #uniform distribution mdp_env = mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution)", "- np.min(cvar_rets_array[:,1]))/17 for i,pt in enumerate(unique_pts_lambdas): if i in [0,1,2,4]: plt.text(pt[0] - 6.2*offsetx,", "- 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') plt.xticks(fontsize=18) plt.yticks(fontsize=18) plt.xlabel(\"Robustness (CVaR)\", fontsize=20) plt.ylabel(\"Expected", "posterior.transpose() #each column is a reward sample if __name__==\"__main__\": seed = 1234 np.random.seed(seed)", "mdp_env)) print('q-values', mdp.get_q_values(u_sa, mdp_env)) #run CVaR optimization, maybe just the robust version for", "= 0.95 alpha = 0.99 lamda = 0.9 posterior = generate_posterior_samples(num_samples) r_sa =", "= 0.99 lamda = 0.9 posterior = generate_posterior_samples(num_samples) r_sa = np.mean(posterior, axis=1) init_distribution", "over lambda = {}\".format(alpha, lambda_range)) cvar_rets = generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha,", "0.00001: unique = False break if unique: unique_pts_lambdas.append((pt[0], pt[1], lambda_range[i])) unique_pts.append(np.array(pt)) #calculate offset", "import mdp import numpy as np import scipy import random import generate_efficient_frontier import", "lambda_range)) cvar_rets = generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False) cvar_rets_array = np.array(cvar_rets)", "u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False) cvar_rets_array = np.array(cvar_rets) plt.figure() plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1], '-o')", "range(4): r_noop.append(-np.random.gamma(locs, scales[i], 1)[0]) r_noop = np.array(r_noop) #rewards for repair are -N(100,1) for", "{}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in [5]: plt.text(pt[0] - 5.5*offsetx, pt[1] - 1.5*offsety,", "in unique_pts: if np.linalg.norm(upt - pt) < 0.00001: unique = False break if", "np.min(cvar_rets_array[:,1]))/17 for i,pt in enumerate(unique_pts_lambdas): if i in [0,1,2,4]: plt.text(pt[0] - 6.2*offsetx, pt[1]", "1)[0]) r_noop = np.array(r_noop) #rewards for repair are -N(100,1) for all but last", "all_samples = [] for i in range(num_samples): r_sample = generate_reward_sample() all_samples.append(r_sample) print(\"mean of", "from samples\") print(np.mean(all_samples, axis=0)) posterior = np.array(all_samples) return posterior.transpose() #each column is a", "#uniform dist since samples from MCMC #generate efficient frontier lambda_range = [0.0, 0.3,", "= {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in [3]: plt.text(pt[0] - 6.2*offsetx, pt[1] -", "policy from posterior\") utils.print_policy_from_occupancies(u_sa, mdp_env) print(\"rewards\") print(mdp_env.r_sa) print(\"expected value = \", np.dot(u_sa, r_sa))", "if np.linalg.norm(upt - pt) < 0.00001: unique = False break if unique: unique_pts_lambdas.append((pt[0],", "np.random.seed(seed) scipy.random.seed(seed) random.seed(seed) num_states = 4 num_samples = 2000 gamma = 0.95 alpha", "lambda_range, alpha, debug=False) alpha = 0.99 print(\"calculating optimal policy for alpha = {}", "np.min(cvar_rets_array[:,0]))/30 offsety = (np.max(cvar_rets_array[:,1]) - np.min(cvar_rets_array[:,1]))/17 for i,pt in enumerate(unique_pts_lambdas): if i in", "print(\"mean of posterior from samples\") print(np.mean(all_samples, axis=0)) posterior = np.array(all_samples) return posterior.transpose() #each", "as plt def generate_reward_sample(): #rewards for no-op are gamma distributed r_noop = []", "i in [0,1,2,4]: plt.text(pt[0] - 6.2*offsetx, pt[1] , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold')", "gamma, init_distribution) print(\"---MDP solution for expectation---\") print(\"mean MDP reward\", r_sa) u_sa = mdp.solve_mdp_lp(mdp_env,", "solution for expectation---\") print(\"mean MDP reward\", r_sa) u_sa = mdp.solve_mdp_lp(mdp_env, debug=True) print(\"mean policy", "#generate efficient frontier lambda_range = [0.0, 0.3, 0.5, 0.75, 0.95,0.99, 1.0] #generate_efficient_frontier.calc_frontier(mdp_env, u_expert,", "bayesian_irl import mdp_worlds import utils import mdp import numpy as np import scipy", "print(\"values\", mdp.get_state_values(u_sa, mdp_env)) print('q-values', mdp.get_q_values(u_sa, mdp_env)) #run CVaR optimization, maybe just the robust", "i,pt in enumerate(unique_pts_lambdas): if i in [0,1,2,4]: plt.text(pt[0] - 6.2*offsetx, pt[1] , r\"$\\lambda", "alpha, debug=False) alpha = 0.99 print(\"calculating optimal policy for alpha = {} over", "posterior_probs, lambda_range, alpha, debug=False) alpha = 0.99 print(\"calculating optimal policy for alpha =", "print(\"---MDP solution for expectation---\") print(\"mean MDP reward\", r_sa) u_sa = mdp.solve_mdp_lp(mdp_env, debug=True) print(\"mean", "#uniform distribution mdp_env = mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution) print(\"---MDP solution for expectation---\") print(\"mean", "{}\".format(alpha, lambda_range)) cvar_rets = generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False) cvar_rets_array =", "print(\"calculating optimal policy for alpha = {} over lambda = {}\".format(alpha, lambda_range)) cvar_rets", "for upt in unique_pts: if np.linalg.norm(upt - pt) < 0.00001: unique = False", "posterior from samples\") print(np.mean(all_samples, axis=0)) posterior = np.array(all_samples) return posterior.transpose() #each column is", "# print(\"solving for CVaR optimal policy\") posterior_probs = np.ones(num_samples) / num_samples #uniform dist", "1.2*offsety , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in [5]: plt.text(pt[0] -", "in the figure with the corresponding lambda values unique_pts_lambdas = [] unique_pts =", "a reward sample if __name__==\"__main__\": seed = 1234 np.random.seed(seed) scipy.random.seed(seed) random.seed(seed) num_states =", "mdp.get_state_values(u_sa, mdp_env)) print('q-values', mdp.get_q_values(u_sa, mdp_env)) #run CVaR optimization, maybe just the robust version", "np import scipy import random import generate_efficient_frontier import matplotlib.pyplot as plt def generate_reward_sample():", "= True for upt in unique_pts: if np.linalg.norm(upt - pt) < 0.00001: unique", "- pt) < 0.00001: unique = False break if unique: unique_pts_lambdas.append((pt[0], pt[1], lambda_range[i]))", "unique_pts_lambdas = [] unique_pts = [] for i,pt in enumerate(cvar_rets_array): unique = True", "alpha, debug=False) cvar_rets_array = np.array(cvar_rets) plt.figure() plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1], '-o') #go through and label", "debug=False) alpha = 0.99 print(\"calculating optimal policy for alpha = {} over lambda", "value = \", np.dot(u_sa, r_sa)) stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env) print(\"expected return\", mdp.get_policy_expected_return(stoch_pi, mdp_env))", "for expectation---\") print(\"mean MDP reward\", r_sa) u_sa = mdp.solve_mdp_lp(mdp_env, debug=True) print(\"mean policy from", "#go through and label the points in the figure with the corresponding lambda", "np.ones(num_samples) / num_samples #uniform dist since samples from MCMC #generate efficient frontier lambda_range", "cvar_rets_array[:,1], '-o') #go through and label the points in the figure with the", "{}$\".format(str(pt[2])), fontsize=19, fontweight='bold') plt.xticks(fontsize=18) plt.yticks(fontsize=18) plt.xlabel(\"Robustness (CVaR)\", fontsize=20) plt.ylabel(\"Expected Return\", fontsize=20) plt.tight_layout() plt.savefig('./figs/machine_replacement/efficient_frontier_machine_replacement.png')", "mdp_env.num_states) # print(\"solving for CVaR optimal policy\") posterior_probs = np.ones(num_samples) / num_samples #uniform", "the figure with the corresponding lambda values unique_pts_lambdas = [] unique_pts = []", "all but last state where it is -N(130,20) r_repair = -100 + -1", "import mdp_worlds import utils import mdp import numpy as np import scipy import", "= \", np.dot(u_sa, r_sa)) stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env) print(\"expected return\", mdp.get_policy_expected_return(stoch_pi, mdp_env)) print(\"values\",", "= 0.99 print(\"calculating optimal policy for alpha = {} over lambda = {}\".format(alpha,", "fontweight='bold') elif i in [5]: plt.text(pt[0] - 5.5*offsetx, pt[1] - 1.5*offsety, r\"$\\lambda =", "locs = 1/2 scales = [20, 40, 80,190] for i in range(4): r_noop.append(-np.random.gamma(locs,", "axis=1) init_distribution = np.ones(num_states)/num_states #uniform distribution mdp_env = mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution) print(\"---MDP", "print(mdp_env.r_sa) print(\"expected value = \", np.dot(u_sa, r_sa)) stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env) print(\"expected return\",", "= generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False) cvar_rets_array = np.array(cvar_rets) plt.figure() plt.plot(cvar_rets_array[:,0],", "import bayesian_irl import mdp_worlds import utils import mdp import numpy as np import", "for all but last state where it is -N(130,20) r_repair = -100 +", "import scipy import random import generate_efficient_frontier import matplotlib.pyplot as plt def generate_reward_sample(): #rewards", "def generate_reward_sample(): #rewards for no-op are gamma distributed r_noop = [] locs =", "lamda = 0.9 posterior = generate_posterior_samples(num_samples) r_sa = np.mean(posterior, axis=1) init_distribution = np.ones(num_states)/num_states", "but last state where it is -N(130,20) r_repair = -100 + -1 *", "num_samples #uniform dist since samples from MCMC #generate efficient frontier lambda_range = [0.0,", "r_sa, gamma, init_distribution) print(\"---MDP solution for expectation---\") print(\"mean MDP reward\", r_sa) u_sa =", "6.2*offsetx, pt[1] - 1.2*offsety , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in", "i in [3]: plt.text(pt[0] - 6.2*offsetx, pt[1] - 1.2*offsety , r\"$\\lambda = {}$\".format(str(pt[2])),", "plt.text(pt[0] - 6.2*offsetx, pt[1] - 1.2*offsety , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif", "are -N(100,1) for all but last state where it is -N(130,20) r_repair =", "if unique: unique_pts_lambdas.append((pt[0], pt[1], lambda_range[i])) unique_pts.append(np.array(pt)) #calculate offset offsetx = (np.max(cvar_rets_array[:,0]) - np.min(cvar_rets_array[:,0]))/30", "the points in the figure with the corresponding lambda values unique_pts_lambdas = []", "last state where it is -N(130,20) r_repair = -100 + -1 * np.random.randn(4)", "for i,pt in enumerate(unique_pts_lambdas): if i in [0,1,2,4]: plt.text(pt[0] - 6.2*offsetx, pt[1] ,", "+ -1 * np.random.randn(4) return np.concatenate((r_noop, r_repair)) def generate_posterior_samples(num_samples): print(\"samples\") all_samples = []", "lambda_range = [0.0, 0.3, 0.5, 0.75, 0.95,0.99, 1.0] #generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range,", "through and label the points in the figure with the corresponding lambda values", "unique = True for upt in unique_pts: if np.linalg.norm(upt - pt) < 0.00001:", "pt[1] - 1.2*offsety , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in [5]:", "all_samples.append(r_sample) print(\"mean of posterior from samples\") print(np.mean(all_samples, axis=0)) posterior = np.array(all_samples) return posterior.transpose()", "utils import mdp import numpy as np import scipy import random import generate_efficient_frontier", "mdp_env = mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution) print(\"---MDP solution for expectation---\") print(\"mean MDP reward\",", "generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False) cvar_rets_array = np.array(cvar_rets) plt.figure() plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1],", "r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in [3]: plt.text(pt[0] - 6.2*offsetx, pt[1]", "it is -N(130,20) r_repair = -100 + -1 * np.random.randn(4) return np.concatenate((r_noop, r_repair))", "unique_pts: if np.linalg.norm(upt - pt) < 0.00001: unique = False break if unique:", "dist since samples from MCMC #generate efficient frontier lambda_range = [0.0, 0.3, 0.5,", "alpha = {} over lambda = {}\".format(alpha, lambda_range)) cvar_rets = generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior,", "{}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in [3]: plt.text(pt[0] - 6.2*offsetx, pt[1] - 1.2*offsety", "generate_reward_sample() all_samples.append(r_sample) print(\"mean of posterior from samples\") print(np.mean(all_samples, axis=0)) posterior = np.array(all_samples) return", "= {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in [5]: plt.text(pt[0] - 5.5*offsetx, pt[1] -", "in enumerate(cvar_rets_array): unique = True for upt in unique_pts: if np.linalg.norm(upt - pt)", "state where it is -N(130,20) r_repair = -100 + -1 * np.random.randn(4) return", "maybe just the robust version for now u_expert = np.zeros(mdp_env.num_actions * mdp_env.num_states) #", "optimal policy\") posterior_probs = np.ones(num_samples) / num_samples #uniform dist since samples from MCMC", "from MCMC #generate efficient frontier lambda_range = [0.0, 0.3, 0.5, 0.75, 0.95,0.99, 1.0]", "in enumerate(unique_pts_lambdas): if i in [0,1,2,4]: plt.text(pt[0] - 6.2*offsetx, pt[1] , r\"$\\lambda =", "= {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') else: plt.text(pt[0]-offsetx, pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19,", "80,190] for i in range(4): r_noop.append(-np.random.gamma(locs, scales[i], 1)[0]) r_noop = np.array(r_noop) #rewards for", "in range(4): r_noop.append(-np.random.gamma(locs, scales[i], 1)[0]) r_noop = np.array(r_noop) #rewards for repair are -N(100,1)", "for now u_expert = np.zeros(mdp_env.num_actions * mdp_env.num_states) # print(\"solving for CVaR optimal policy\")", "figure with the corresponding lambda values unique_pts_lambdas = [] unique_pts = [] for", "#rewards for no-op are gamma distributed r_noop = [] locs = 1/2 scales", "utils.print_policy_from_occupancies(u_sa, mdp_env) print(\"rewards\") print(mdp_env.r_sa) print(\"expected value = \", np.dot(u_sa, r_sa)) stoch_pi = utils.get_optimal_policy_from_usa(u_sa,", "np.dot(u_sa, r_sa)) stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env) print(\"expected return\", mdp.get_policy_expected_return(stoch_pi, mdp_env)) print(\"values\", mdp.get_state_values(u_sa, mdp_env))", "* np.random.randn(4) return np.concatenate((r_noop, r_repair)) def generate_posterior_samples(num_samples): print(\"samples\") all_samples = [] for i", "[0.0, 0.3, 0.5, 0.75, 0.95,0.99, 1.0] #generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False)", "range(num_samples): r_sample = generate_reward_sample() all_samples.append(r_sample) print(\"mean of posterior from samples\") print(np.mean(all_samples, axis=0)) posterior", "= [] for i in range(num_samples): r_sample = generate_reward_sample() all_samples.append(r_sample) print(\"mean of posterior", "seed = 1234 np.random.seed(seed) scipy.random.seed(seed) random.seed(seed) num_states = 4 num_samples = 2000 gamma", "mdp.get_policy_expected_return(stoch_pi, mdp_env)) print(\"values\", mdp.get_state_values(u_sa, mdp_env)) print('q-values', mdp.get_q_values(u_sa, mdp_env)) #run CVaR optimization, maybe just", "posterior, posterior_probs, lambda_range, alpha, debug=False) cvar_rets_array = np.array(cvar_rets) plt.figure() plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1], '-o') #go", "r_noop = np.array(r_noop) #rewards for repair are -N(100,1) for all but last state", "scales = [20, 40, 80,190] for i in range(4): r_noop.append(-np.random.gamma(locs, scales[i], 1)[0]) r_noop", "gamma distributed r_noop = [] locs = 1/2 scales = [20, 40, 80,190]", "upt in unique_pts: if np.linalg.norm(upt - pt) < 0.00001: unique = False break", "return np.concatenate((r_noop, r_repair)) def generate_posterior_samples(num_samples): print(\"samples\") all_samples = [] for i in range(num_samples):", "plt.text(pt[0]-offsetx, pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') plt.xticks(fontsize=18) plt.yticks(fontsize=18) plt.xlabel(\"Robustness (CVaR)\",", "def generate_posterior_samples(num_samples): print(\"samples\") all_samples = [] for i in range(num_samples): r_sample = generate_reward_sample()", ", r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in [5]: plt.text(pt[0] - 5.5*offsetx,", "in [5]: plt.text(pt[0] - 5.5*offsetx, pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold')", "i in range(num_samples): r_sample = generate_reward_sample() all_samples.append(r_sample) print(\"mean of posterior from samples\") print(np.mean(all_samples,", "= np.ones(num_samples) / num_samples #uniform dist since samples from MCMC #generate efficient frontier", "plt.text(pt[0] - 6.2*offsetx, pt[1] , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in", "optimal policy for alpha = {} over lambda = {}\".format(alpha, lambda_range)) cvar_rets =", "-N(130,20) r_repair = -100 + -1 * np.random.randn(4) return np.concatenate((r_noop, r_repair)) def generate_posterior_samples(num_samples):", ", r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in [3]: plt.text(pt[0] - 6.2*offsetx,", "scipy.random.seed(seed) random.seed(seed) num_states = 4 num_samples = 2000 gamma = 0.95 alpha =", "(np.max(cvar_rets_array[:,0]) - np.min(cvar_rets_array[:,0]))/30 offsety = (np.max(cvar_rets_array[:,1]) - np.min(cvar_rets_array[:,1]))/17 for i,pt in enumerate(unique_pts_lambdas): if", "u_expert = np.zeros(mdp_env.num_actions * mdp_env.num_states) # print(\"solving for CVaR optimal policy\") posterior_probs =", "* mdp_env.num_states) # print(\"solving for CVaR optimal policy\") posterior_probs = np.ones(num_samples) / num_samples", "0.5, 0.75, 0.95,0.99, 1.0] #generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False) alpha =", "posterior, posterior_probs, lambda_range, alpha, debug=False) alpha = 0.99 print(\"calculating optimal policy for alpha", "np.random.randn(4) return np.concatenate((r_noop, r_repair)) def generate_posterior_samples(num_samples): print(\"samples\") all_samples = [] for i in", "[20, 40, 80,190] for i in range(4): r_noop.append(-np.random.gamma(locs, scales[i], 1)[0]) r_noop = np.array(r_noop)", "mdp.solve_mdp_lp(mdp_env, debug=True) print(\"mean policy from posterior\") utils.print_stochastic_policy_action_probs(u_sa, mdp_env) print(\"MAP/Mean policy from posterior\") utils.print_policy_from_occupancies(u_sa,", "r_sa)) stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env) print(\"expected return\", mdp.get_policy_expected_return(stoch_pi, mdp_env)) print(\"values\", mdp.get_state_values(u_sa, mdp_env)) print('q-values',", "__name__==\"__main__\": seed = 1234 np.random.seed(seed) scipy.random.seed(seed) random.seed(seed) num_states = 4 num_samples = 2000", "- 6.2*offsetx, pt[1] , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in [3]:", "i,pt in enumerate(cvar_rets_array): unique = True for upt in unique_pts: if np.linalg.norm(upt -", "= {} over lambda = {}\".format(alpha, lambda_range)) cvar_rets = generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs,", "corresponding lambda values unique_pts_lambdas = [] unique_pts = [] for i,pt in enumerate(cvar_rets_array):", "[] locs = 1/2 scales = [20, 40, 80,190] for i in range(4):", "= np.mean(posterior, axis=1) init_distribution = np.ones(num_states)/num_states #uniform distribution mdp_env = mdp.MachineReplacementMDP(num_states, r_sa, gamma,", "posterior_probs = np.ones(num_samples) / num_samples #uniform dist since samples from MCMC #generate efficient", "= 1/2 scales = [20, 40, 80,190] for i in range(4): r_noop.append(-np.random.gamma(locs, scales[i],", "40, 80,190] for i in range(4): r_noop.append(-np.random.gamma(locs, scales[i], 1)[0]) r_noop = np.array(r_noop) #rewards", "posterior_probs, lambda_range, alpha, debug=False) cvar_rets_array = np.array(cvar_rets) plt.figure() plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1], '-o') #go through", "init_distribution) print(\"---MDP solution for expectation---\") print(\"mean MDP reward\", r_sa) u_sa = mdp.solve_mdp_lp(mdp_env, debug=True)", "print('q-values', mdp.get_q_values(u_sa, mdp_env)) #run CVaR optimization, maybe just the robust version for now", "sample if __name__==\"__main__\": seed = 1234 np.random.seed(seed) scipy.random.seed(seed) random.seed(seed) num_states = 4 num_samples", "= np.zeros(mdp_env.num_actions * mdp_env.num_states) # print(\"solving for CVaR optimal policy\") posterior_probs = np.ones(num_samples)", "and label the points in the figure with the corresponding lambda values unique_pts_lambdas", "fontsize=19, fontweight='bold') else: plt.text(pt[0]-offsetx, pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') plt.xticks(fontsize=18)", "= 4 num_samples = 2000 gamma = 0.95 alpha = 0.99 lamda =", "for repair are -N(100,1) for all but last state where it is -N(130,20)", "0.95 alpha = 0.99 lamda = 0.9 posterior = generate_posterior_samples(num_samples) r_sa = np.mean(posterior,", "= {}\".format(alpha, lambda_range)) cvar_rets = generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False) cvar_rets_array", "0.75, 0.95,0.99, 1.0] #generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False) alpha = 0.99", "optimization, maybe just the robust version for now u_expert = np.zeros(mdp_env.num_actions * mdp_env.num_states)", "in range(num_samples): r_sample = generate_reward_sample() all_samples.append(r_sample) print(\"mean of posterior from samples\") print(np.mean(all_samples, axis=0))", "- 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') else: plt.text(pt[0]-offsetx, pt[1] - 1.5*offsety, r\"$\\lambda", "#each column is a reward sample if __name__==\"__main__\": seed = 1234 np.random.seed(seed) scipy.random.seed(seed)", "= mdp.solve_mdp_lp(mdp_env, debug=True) print(\"mean policy from posterior\") utils.print_stochastic_policy_action_probs(u_sa, mdp_env) print(\"MAP/Mean policy from posterior\")", "policy\") posterior_probs = np.ones(num_samples) / num_samples #uniform dist since samples from MCMC #generate", "'-o') #go through and label the points in the figure with the corresponding", "for i in range(4): r_noop.append(-np.random.gamma(locs, scales[i], 1)[0]) r_noop = np.array(r_noop) #rewards for repair", "= 0.9 posterior = generate_posterior_samples(num_samples) r_sa = np.mean(posterior, axis=1) init_distribution = np.ones(num_states)/num_states #uniform", "0.99 lamda = 0.9 posterior = generate_posterior_samples(num_samples) r_sa = np.mean(posterior, axis=1) init_distribution =", "policy from posterior\") utils.print_stochastic_policy_action_probs(u_sa, mdp_env) print(\"MAP/Mean policy from posterior\") utils.print_policy_from_occupancies(u_sa, mdp_env) print(\"rewards\") print(mdp_env.r_sa)", "samples from MCMC #generate efficient frontier lambda_range = [0.0, 0.3, 0.5, 0.75, 0.95,0.99,", "elif i in [3]: plt.text(pt[0] - 6.2*offsetx, pt[1] - 1.2*offsety , r\"$\\lambda =", "i in [5]: plt.text(pt[0] - 5.5*offsetx, pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19,", "import random import generate_efficient_frontier import matplotlib.pyplot as plt def generate_reward_sample(): #rewards for no-op", "[0,1,2,4]: plt.text(pt[0] - 6.2*offsetx, pt[1] , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i", "reward sample if __name__==\"__main__\": seed = 1234 np.random.seed(seed) scipy.random.seed(seed) random.seed(seed) num_states = 4", "= np.array(r_noop) #rewards for repair are -N(100,1) for all but last state where", "lambda_range, alpha, debug=False) cvar_rets_array = np.array(cvar_rets) plt.figure() plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1], '-o') #go through and", "scales[i], 1)[0]) r_noop = np.array(r_noop) #rewards for repair are -N(100,1) for all but", "fontweight='bold') else: plt.text(pt[0]-offsetx, pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') plt.xticks(fontsize=18) plt.yticks(fontsize=18)", "[5]: plt.text(pt[0] - 5.5*offsetx, pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') else:", "r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') plt.xticks(fontsize=18) plt.yticks(fontsize=18) plt.xlabel(\"Robustness (CVaR)\", fontsize=20) plt.ylabel(\"Expected Return\", fontsize=20)", "4 num_samples = 2000 gamma = 0.95 alpha = 0.99 lamda = 0.9", "r_noop = [] locs = 1/2 scales = [20, 40, 80,190] for i", "gamma = 0.95 alpha = 0.99 lamda = 0.9 posterior = generate_posterior_samples(num_samples) r_sa", "from posterior\") utils.print_policy_from_occupancies(u_sa, mdp_env) print(\"rewards\") print(mdp_env.r_sa) print(\"expected value = \", np.dot(u_sa, r_sa)) stoch_pi", "pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') else: plt.text(pt[0]-offsetx, pt[1] - 1.5*offsety,", "= [20, 40, 80,190] for i in range(4): r_noop.append(-np.random.gamma(locs, scales[i], 1)[0]) r_noop =", "MDP reward\", r_sa) u_sa = mdp.solve_mdp_lp(mdp_env, debug=True) print(\"mean policy from posterior\") utils.print_stochastic_policy_action_probs(u_sa, mdp_env)", "print(\"rewards\") print(mdp_env.r_sa) print(\"expected value = \", np.dot(u_sa, r_sa)) stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env) print(\"expected", "= [] unique_pts = [] for i,pt in enumerate(cvar_rets_array): unique = True for", "mdp import numpy as np import scipy import random import generate_efficient_frontier import matplotlib.pyplot", "pt[1], lambda_range[i])) unique_pts.append(np.array(pt)) #calculate offset offsetx = (np.max(cvar_rets_array[:,0]) - np.min(cvar_rets_array[:,0]))/30 offsety = (np.max(cvar_rets_array[:,1])", "1/2 scales = [20, 40, 80,190] for i in range(4): r_noop.append(-np.random.gamma(locs, scales[i], 1)[0])", "= [0.0, 0.3, 0.5, 0.75, 0.95,0.99, 1.0] #generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha,", "policy for alpha = {} over lambda = {}\".format(alpha, lambda_range)) cvar_rets = generate_efficient_frontier.calc_frontier(mdp_env,", "/ num_samples #uniform dist since samples from MCMC #generate efficient frontier lambda_range =", "fontsize=19, fontweight='bold') elif i in [3]: plt.text(pt[0] - 6.2*offsetx, pt[1] - 1.2*offsety ,", "stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env) print(\"expected return\", mdp.get_policy_expected_return(stoch_pi, mdp_env)) print(\"values\", mdp.get_state_values(u_sa, mdp_env)) print('q-values', mdp.get_q_values(u_sa,", "\", np.dot(u_sa, r_sa)) stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env) print(\"expected return\", mdp.get_policy_expected_return(stoch_pi, mdp_env)) print(\"values\", mdp.get_state_values(u_sa,", "< 0.00001: unique = False break if unique: unique_pts_lambdas.append((pt[0], pt[1], lambda_range[i])) unique_pts.append(np.array(pt)) #calculate", "-1 * np.random.randn(4) return np.concatenate((r_noop, r_repair)) def generate_posterior_samples(num_samples): print(\"samples\") all_samples = [] for", "#calculate offset offsetx = (np.max(cvar_rets_array[:,0]) - np.min(cvar_rets_array[:,0]))/30 offsety = (np.max(cvar_rets_array[:,1]) - np.min(cvar_rets_array[:,1]))/17 for", "values unique_pts_lambdas = [] unique_pts = [] for i,pt in enumerate(cvar_rets_array): unique =", "True for upt in unique_pts: if np.linalg.norm(upt - pt) < 0.00001: unique =", "posterior = generate_posterior_samples(num_samples) r_sa = np.mean(posterior, axis=1) init_distribution = np.ones(num_states)/num_states #uniform distribution mdp_env", "plt.figure() plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1], '-o') #go through and label the points in the figure", "utils.get_optimal_policy_from_usa(u_sa, mdp_env) print(\"expected return\", mdp.get_policy_expected_return(stoch_pi, mdp_env)) print(\"values\", mdp.get_state_values(u_sa, mdp_env)) print('q-values', mdp.get_q_values(u_sa, mdp_env)) #run", "numpy as np import scipy import random import generate_efficient_frontier import matplotlib.pyplot as plt", "generate_posterior_samples(num_samples) r_sa = np.mean(posterior, axis=1) init_distribution = np.ones(num_states)/num_states #uniform distribution mdp_env = mdp.MachineReplacementMDP(num_states,", "CVaR optimization, maybe just the robust version for now u_expert = np.zeros(mdp_env.num_actions *", "- 5.5*offsetx, pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') else: plt.text(pt[0]-offsetx, pt[1]", "r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') else: plt.text(pt[0]-offsetx, pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])),", "np.zeros(mdp_env.num_actions * mdp_env.num_states) # print(\"solving for CVaR optimal policy\") posterior_probs = np.ones(num_samples) /", "matplotlib.pyplot as plt def generate_reward_sample(): #rewards for no-op are gamma distributed r_noop =", "1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') else: plt.text(pt[0]-offsetx, pt[1] - 1.5*offsety, r\"$\\lambda =", "= False break if unique: unique_pts_lambdas.append((pt[0], pt[1], lambda_range[i])) unique_pts.append(np.array(pt)) #calculate offset offsetx =", "random import generate_efficient_frontier import matplotlib.pyplot as plt def generate_reward_sample(): #rewards for no-op are", "debug=False) cvar_rets_array = np.array(cvar_rets) plt.figure() plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1], '-o') #go through and label the", "1234 np.random.seed(seed) scipy.random.seed(seed) random.seed(seed) num_states = 4 num_samples = 2000 gamma = 0.95", "= (np.max(cvar_rets_array[:,1]) - np.min(cvar_rets_array[:,1]))/17 for i,pt in enumerate(unique_pts_lambdas): if i in [0,1,2,4]: plt.text(pt[0]", "0.95,0.99, 1.0] #generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False) alpha = 0.99 print(\"calculating", "{}$\".format(str(pt[2])), fontsize=19, fontweight='bold') else: plt.text(pt[0]-offsetx, pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold')", "1.0] #generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False) alpha = 0.99 print(\"calculating optimal", "np.array(cvar_rets) plt.figure() plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1], '-o') #go through and label the points in the", "no-op are gamma distributed r_noop = [] locs = 1/2 scales = [20,", "-100 + -1 * np.random.randn(4) return np.concatenate((r_noop, r_repair)) def generate_posterior_samples(num_samples): print(\"samples\") all_samples =", "in [0,1,2,4]: plt.text(pt[0] - 6.2*offsetx, pt[1] , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif", "plt.text(pt[0] - 5.5*offsetx, pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') else: plt.text(pt[0]-offsetx,", "version for now u_expert = np.zeros(mdp_env.num_actions * mdp_env.num_states) # print(\"solving for CVaR optimal", "r_repair = -100 + -1 * np.random.randn(4) return np.concatenate((r_noop, r_repair)) def generate_posterior_samples(num_samples): print(\"samples\")", "np.array(all_samples) return posterior.transpose() #each column is a reward sample if __name__==\"__main__\": seed =", "just the robust version for now u_expert = np.zeros(mdp_env.num_actions * mdp_env.num_states) # print(\"solving", "debug=True) print(\"mean policy from posterior\") utils.print_stochastic_policy_action_probs(u_sa, mdp_env) print(\"MAP/Mean policy from posterior\") utils.print_policy_from_occupancies(u_sa, mdp_env)", "break if unique: unique_pts_lambdas.append((pt[0], pt[1], lambda_range[i])) unique_pts.append(np.array(pt)) #calculate offset offsetx = (np.max(cvar_rets_array[:,0]) -", "for no-op are gamma distributed r_noop = [] locs = 1/2 scales =", "expectation---\") print(\"mean MDP reward\", r_sa) u_sa = mdp.solve_mdp_lp(mdp_env, debug=True) print(\"mean policy from posterior\")", "print(\"samples\") all_samples = [] for i in range(num_samples): r_sample = generate_reward_sample() all_samples.append(r_sample) print(\"mean", "= np.array(all_samples) return posterior.transpose() #each column is a reward sample if __name__==\"__main__\": seed", "import utils import mdp import numpy as np import scipy import random import", "efficient frontier lambda_range = [0.0, 0.3, 0.5, 0.75, 0.95,0.99, 1.0] #generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior,", "for i,pt in enumerate(cvar_rets_array): unique = True for upt in unique_pts: if np.linalg.norm(upt", "of posterior from samples\") print(np.mean(all_samples, axis=0)) posterior = np.array(all_samples) return posterior.transpose() #each column", "offsety = (np.max(cvar_rets_array[:,1]) - np.min(cvar_rets_array[:,1]))/17 for i,pt in enumerate(unique_pts_lambdas): if i in [0,1,2,4]:", "repair are -N(100,1) for all but last state where it is -N(130,20) r_repair", "fontsize=19, fontweight='bold') elif i in [5]: plt.text(pt[0] - 5.5*offsetx, pt[1] - 1.5*offsety, r\"$\\lambda", "for i in range(num_samples): r_sample = generate_reward_sample() all_samples.append(r_sample) print(\"mean of posterior from samples\")", "= np.ones(num_states)/num_states #uniform distribution mdp_env = mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution) print(\"---MDP solution for", "since samples from MCMC #generate efficient frontier lambda_range = [0.0, 0.3, 0.5, 0.75,", "as np import scipy import random import generate_efficient_frontier import matplotlib.pyplot as plt def", "the robust version for now u_expert = np.zeros(mdp_env.num_actions * mdp_env.num_states) # print(\"solving for", "MCMC #generate efficient frontier lambda_range = [0.0, 0.3, 0.5, 0.75, 0.95,0.99, 1.0] #generate_efficient_frontier.calc_frontier(mdp_env,", "unique = False break if unique: unique_pts_lambdas.append((pt[0], pt[1], lambda_range[i])) unique_pts.append(np.array(pt)) #calculate offset offsetx", "6.2*offsetx, pt[1] , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in [3]: plt.text(pt[0]", "np.ones(num_states)/num_states #uniform distribution mdp_env = mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution) print(\"---MDP solution for expectation---\")", "cvar_rets_array = np.array(cvar_rets) plt.figure() plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1], '-o') #go through and label the points", "generate_efficient_frontier import matplotlib.pyplot as plt def generate_reward_sample(): #rewards for no-op are gamma distributed", "mdp_env)) #run CVaR optimization, maybe just the robust version for now u_expert =", "1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') plt.xticks(fontsize=18) plt.yticks(fontsize=18) plt.xlabel(\"Robustness (CVaR)\", fontsize=20) plt.ylabel(\"Expected Return\",", "np.array(r_noop) #rewards for repair are -N(100,1) for all but last state where it", "lambda values unique_pts_lambdas = [] unique_pts = [] for i,pt in enumerate(cvar_rets_array): unique", "plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1], '-o') #go through and label the points in the figure with", "alpha = 0.99 lamda = 0.9 posterior = generate_posterior_samples(num_samples) r_sa = np.mean(posterior, axis=1)", "False break if unique: unique_pts_lambdas.append((pt[0], pt[1], lambda_range[i])) unique_pts.append(np.array(pt)) #calculate offset offsetx = (np.max(cvar_rets_array[:,0])", "mdp_env) print(\"rewards\") print(mdp_env.r_sa) print(\"expected value = \", np.dot(u_sa, r_sa)) stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env)", "CVaR optimal policy\") posterior_probs = np.ones(num_samples) / num_samples #uniform dist since samples from", "unique_pts.append(np.array(pt)) #calculate offset offsetx = (np.max(cvar_rets_array[:,0]) - np.min(cvar_rets_array[:,0]))/30 offsety = (np.max(cvar_rets_array[:,1]) - np.min(cvar_rets_array[:,1]))/17", "reward\", r_sa) u_sa = mdp.solve_mdp_lp(mdp_env, debug=True) print(\"mean policy from posterior\") utils.print_stochastic_policy_action_probs(u_sa, mdp_env) print(\"MAP/Mean", "alpha = 0.99 print(\"calculating optimal policy for alpha = {} over lambda =", "from posterior\") utils.print_stochastic_policy_action_probs(u_sa, mdp_env) print(\"MAP/Mean policy from posterior\") utils.print_policy_from_occupancies(u_sa, mdp_env) print(\"rewards\") print(mdp_env.r_sa) print(\"expected", "are gamma distributed r_noop = [] locs = 1/2 scales = [20, 40,", "5.5*offsetx, pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') else: plt.text(pt[0]-offsetx, pt[1] -", "- 6.2*offsetx, pt[1] - 1.2*offsety , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i", "mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution) print(\"---MDP solution for expectation---\") print(\"mean MDP reward\", r_sa) u_sa", "points in the figure with the corresponding lambda values unique_pts_lambdas = [] unique_pts", "plt def generate_reward_sample(): #rewards for no-op are gamma distributed r_noop = [] locs", "pt) < 0.00001: unique = False break if unique: unique_pts_lambdas.append((pt[0], pt[1], lambda_range[i])) unique_pts.append(np.array(pt))", "mdp_worlds import utils import mdp import numpy as np import scipy import random", "r_sa = np.mean(posterior, axis=1) init_distribution = np.ones(num_states)/num_states #uniform distribution mdp_env = mdp.MachineReplacementMDP(num_states, r_sa,", "offsetx = (np.max(cvar_rets_array[:,0]) - np.min(cvar_rets_array[:,0]))/30 offsety = (np.max(cvar_rets_array[:,1]) - np.min(cvar_rets_array[:,1]))/17 for i,pt in", "elif i in [5]: plt.text(pt[0] - 5.5*offsetx, pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])),", "= [] for i,pt in enumerate(cvar_rets_array): unique = True for upt in unique_pts:", "generate_posterior_samples(num_samples): print(\"samples\") all_samples = [] for i in range(num_samples): r_sample = generate_reward_sample() all_samples.append(r_sample)", "#run CVaR optimization, maybe just the robust version for now u_expert = np.zeros(mdp_env.num_actions", "the corresponding lambda values unique_pts_lambdas = [] unique_pts = [] for i,pt in", "fontweight='bold') elif i in [3]: plt.text(pt[0] - 6.2*offsetx, pt[1] - 1.2*offsety , r\"$\\lambda", "utils.print_stochastic_policy_action_probs(u_sa, mdp_env) print(\"MAP/Mean policy from posterior\") utils.print_policy_from_occupancies(u_sa, mdp_env) print(\"rewards\") print(mdp_env.r_sa) print(\"expected value =", "mdp_env) print(\"MAP/Mean policy from posterior\") utils.print_policy_from_occupancies(u_sa, mdp_env) print(\"rewards\") print(mdp_env.r_sa) print(\"expected value = \",", "{} over lambda = {}\".format(alpha, lambda_range)) cvar_rets = generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range,", "print(np.mean(all_samples, axis=0)) posterior = np.array(all_samples) return posterior.transpose() #each column is a reward sample", "#generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False) alpha = 0.99 print(\"calculating optimal policy", "[] for i in range(num_samples): r_sample = generate_reward_sample() all_samples.append(r_sample) print(\"mean of posterior from", "[] for i,pt in enumerate(cvar_rets_array): unique = True for upt in unique_pts: if", "np.concatenate((r_noop, r_repair)) def generate_posterior_samples(num_samples): print(\"samples\") all_samples = [] for i in range(num_samples): r_sample", "print(\"mean policy from posterior\") utils.print_stochastic_policy_action_probs(u_sa, mdp_env) print(\"MAP/Mean policy from posterior\") utils.print_policy_from_occupancies(u_sa, mdp_env) print(\"rewards\")", "= (np.max(cvar_rets_array[:,0]) - np.min(cvar_rets_array[:,0]))/30 offsety = (np.max(cvar_rets_array[:,1]) - np.min(cvar_rets_array[:,1]))/17 for i,pt in enumerate(unique_pts_lambdas):", "is a reward sample if __name__==\"__main__\": seed = 1234 np.random.seed(seed) scipy.random.seed(seed) random.seed(seed) num_states", "label the points in the figure with the corresponding lambda values unique_pts_lambdas =", "offset offsetx = (np.max(cvar_rets_array[:,0]) - np.min(cvar_rets_array[:,0]))/30 offsety = (np.max(cvar_rets_array[:,1]) - np.min(cvar_rets_array[:,1]))/17 for i,pt", "where it is -N(130,20) r_repair = -100 + -1 * np.random.randn(4) return np.concatenate((r_noop,", "now u_expert = np.zeros(mdp_env.num_actions * mdp_env.num_states) # print(\"solving for CVaR optimal policy\") posterior_probs", "(np.max(cvar_rets_array[:,1]) - np.min(cvar_rets_array[:,1]))/17 for i,pt in enumerate(unique_pts_lambdas): if i in [0,1,2,4]: plt.text(pt[0] -", "import matplotlib.pyplot as plt def generate_reward_sample(): #rewards for no-op are gamma distributed r_noop", "r_noop.append(-np.random.gamma(locs, scales[i], 1)[0]) r_noop = np.array(r_noop) #rewards for repair are -N(100,1) for all", "random.seed(seed) num_states = 4 num_samples = 2000 gamma = 0.95 alpha = 0.99", "print(\"mean MDP reward\", r_sa) u_sa = mdp.solve_mdp_lp(mdp_env, debug=True) print(\"mean policy from posterior\") utils.print_stochastic_policy_action_probs(u_sa,", "for alpha = {} over lambda = {}\".format(alpha, lambda_range)) cvar_rets = generate_efficient_frontier.calc_frontier(mdp_env, u_expert,", "0.3, 0.5, 0.75, 0.95,0.99, 1.0] #generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False) alpha", "unique_pts = [] for i,pt in enumerate(cvar_rets_array): unique = True for upt in", "return posterior.transpose() #each column is a reward sample if __name__==\"__main__\": seed = 1234", "for CVaR optimal policy\") posterior_probs = np.ones(num_samples) / num_samples #uniform dist since samples", "print(\"expected value = \", np.dot(u_sa, r_sa)) stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env) print(\"expected return\", mdp.get_policy_expected_return(stoch_pi,", "fontsize=19, fontweight='bold') plt.xticks(fontsize=18) plt.yticks(fontsize=18) plt.xlabel(\"Robustness (CVaR)\", fontsize=20) plt.ylabel(\"Expected Return\", fontsize=20) plt.tight_layout() plt.savefig('./figs/machine_replacement/efficient_frontier_machine_replacement.png') plt.show()", "= generate_posterior_samples(num_samples) r_sa = np.mean(posterior, axis=1) init_distribution = np.ones(num_states)/num_states #uniform distribution mdp_env =", "robust version for now u_expert = np.zeros(mdp_env.num_actions * mdp_env.num_states) # print(\"solving for CVaR", "print(\"MAP/Mean policy from posterior\") utils.print_policy_from_occupancies(u_sa, mdp_env) print(\"rewards\") print(mdp_env.r_sa) print(\"expected value = \", np.dot(u_sa,", "column is a reward sample if __name__==\"__main__\": seed = 1234 np.random.seed(seed) scipy.random.seed(seed) random.seed(seed)", "= {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') plt.xticks(fontsize=18) plt.yticks(fontsize=18) plt.xlabel(\"Robustness (CVaR)\", fontsize=20) plt.ylabel(\"Expected Return\", fontsize=20) plt.tight_layout()", "with the corresponding lambda values unique_pts_lambdas = [] unique_pts = [] for i,pt", "enumerate(cvar_rets_array): unique = True for upt in unique_pts: if np.linalg.norm(upt - pt) <", "-N(100,1) for all but last state where it is -N(130,20) r_repair = -100", "cvar_rets = generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False) cvar_rets_array = np.array(cvar_rets) plt.figure()", "mdp_env)) print(\"values\", mdp.get_state_values(u_sa, mdp_env)) print('q-values', mdp.get_q_values(u_sa, mdp_env)) #run CVaR optimization, maybe just the", "samples\") print(np.mean(all_samples, axis=0)) posterior = np.array(all_samples) return posterior.transpose() #each column is a reward", "[3]: plt.text(pt[0] - 6.2*offsetx, pt[1] - 1.2*offsety , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold')", "in [3]: plt.text(pt[0] - 6.2*offsetx, pt[1] - 1.2*offsety , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19,", "if __name__==\"__main__\": seed = 1234 np.random.seed(seed) scipy.random.seed(seed) random.seed(seed) num_states = 4 num_samples =", "pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') plt.xticks(fontsize=18) plt.yticks(fontsize=18) plt.xlabel(\"Robustness (CVaR)\", fontsize=20)", "generate_reward_sample(): #rewards for no-op are gamma distributed r_noop = [] locs = 1/2", "import numpy as np import scipy import random import generate_efficient_frontier import matplotlib.pyplot as", "mdp.get_q_values(u_sa, mdp_env)) #run CVaR optimization, maybe just the robust version for now u_expert", "= 1234 np.random.seed(seed) scipy.random.seed(seed) random.seed(seed) num_states = 4 num_samples = 2000 gamma =", "= np.array(cvar_rets) plt.figure() plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1], '-o') #go through and label the points in", "num_states = 4 num_samples = 2000 gamma = 0.95 alpha = 0.99 lamda", "0.9 posterior = generate_posterior_samples(num_samples) r_sa = np.mean(posterior, axis=1) init_distribution = np.ones(num_states)/num_states #uniform distribution", "r_sample = generate_reward_sample() all_samples.append(r_sample) print(\"mean of posterior from samples\") print(np.mean(all_samples, axis=0)) posterior =", "axis=0)) posterior = np.array(all_samples) return posterior.transpose() #each column is a reward sample if", "unique_pts_lambdas.append((pt[0], pt[1], lambda_range[i])) unique_pts.append(np.array(pt)) #calculate offset offsetx = (np.max(cvar_rets_array[:,0]) - np.min(cvar_rets_array[:,0]))/30 offsety =", "if i in [0,1,2,4]: plt.text(pt[0] - 6.2*offsetx, pt[1] , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19,", "[] unique_pts = [] for i,pt in enumerate(cvar_rets_array): unique = True for upt", "= -100 + -1 * np.random.randn(4) return np.concatenate((r_noop, r_repair)) def generate_posterior_samples(num_samples): print(\"samples\") all_samples", "pt[1] , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in [3]: plt.text(pt[0] -", "frontier lambda_range = [0.0, 0.3, 0.5, 0.75, 0.95,0.99, 1.0] #generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs,", "= utils.get_optimal_policy_from_usa(u_sa, mdp_env) print(\"expected return\", mdp.get_policy_expected_return(stoch_pi, mdp_env)) print(\"values\", mdp.get_state_values(u_sa, mdp_env)) print('q-values', mdp.get_q_values(u_sa, mdp_env))", "print(\"expected return\", mdp.get_policy_expected_return(stoch_pi, mdp_env)) print(\"values\", mdp.get_state_values(u_sa, mdp_env)) print('q-values', mdp.get_q_values(u_sa, mdp_env)) #run CVaR optimization,", "= 2000 gamma = 0.95 alpha = 0.99 lamda = 0.9 posterior =", "0.99 print(\"calculating optimal policy for alpha = {} over lambda = {}\".format(alpha, lambda_range))", "posterior\") utils.print_policy_from_occupancies(u_sa, mdp_env) print(\"rewards\") print(mdp_env.r_sa) print(\"expected value = \", np.dot(u_sa, r_sa)) stoch_pi =", "print(\"solving for CVaR optimal policy\") posterior_probs = np.ones(num_samples) / num_samples #uniform dist since", "= [] locs = 1/2 scales = [20, 40, 80,190] for i in", "mdp_env) print(\"expected return\", mdp.get_policy_expected_return(stoch_pi, mdp_env)) print(\"values\", mdp.get_state_values(u_sa, mdp_env)) print('q-values', mdp.get_q_values(u_sa, mdp_env)) #run CVaR", "else: plt.text(pt[0]-offsetx, pt[1] - 1.5*offsety, r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') plt.xticks(fontsize=18) plt.yticks(fontsize=18) plt.xlabel(\"Robustness", "r_repair)) def generate_posterior_samples(num_samples): print(\"samples\") all_samples = [] for i in range(num_samples): r_sample =", "- 1.2*offsety , r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in [5]: plt.text(pt[0]", "distributed r_noop = [] locs = 1/2 scales = [20, 40, 80,190] for", "posterior = np.array(all_samples) return posterior.transpose() #each column is a reward sample if __name__==\"__main__\":", "r\"$\\lambda = {}$\".format(str(pt[2])), fontsize=19, fontweight='bold') elif i in [5]: plt.text(pt[0] - 5.5*offsetx, pt[1]", "np.linalg.norm(upt - pt) < 0.00001: unique = False break if unique: unique_pts_lambdas.append((pt[0], pt[1],", "lambda = {}\".format(alpha, lambda_range)) cvar_rets = generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False)", "#rewards for repair are -N(100,1) for all but last state where it is", "init_distribution = np.ones(num_states)/num_states #uniform distribution mdp_env = mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution) print(\"---MDP solution", "posterior\") utils.print_stochastic_policy_action_probs(u_sa, mdp_env) print(\"MAP/Mean policy from posterior\") utils.print_policy_from_occupancies(u_sa, mdp_env) print(\"rewards\") print(mdp_env.r_sa) print(\"expected value", "scipy import random import generate_efficient_frontier import matplotlib.pyplot as plt def generate_reward_sample(): #rewards for", "i in range(4): r_noop.append(-np.random.gamma(locs, scales[i], 1)[0]) r_noop = np.array(r_noop) #rewards for repair are", "u_sa = mdp.solve_mdp_lp(mdp_env, debug=True) print(\"mean policy from posterior\") utils.print_stochastic_policy_action_probs(u_sa, mdp_env) print(\"MAP/Mean policy from", "r_sa) u_sa = mdp.solve_mdp_lp(mdp_env, debug=True) print(\"mean policy from posterior\") utils.print_stochastic_policy_action_probs(u_sa, mdp_env) print(\"MAP/Mean policy", "import generate_efficient_frontier import matplotlib.pyplot as plt def generate_reward_sample(): #rewards for no-op are gamma", "is -N(130,20) r_repair = -100 + -1 * np.random.randn(4) return np.concatenate((r_noop, r_repair)) def", "- np.min(cvar_rets_array[:,0]))/30 offsety = (np.max(cvar_rets_array[:,1]) - np.min(cvar_rets_array[:,1]))/17 for i,pt in enumerate(unique_pts_lambdas): if i", "num_samples = 2000 gamma = 0.95 alpha = 0.99 lamda = 0.9 posterior", "unique: unique_pts_lambdas.append((pt[0], pt[1], lambda_range[i])) unique_pts.append(np.array(pt)) #calculate offset offsetx = (np.max(cvar_rets_array[:,0]) - np.min(cvar_rets_array[:,0]))/30 offsety", "return\", mdp.get_policy_expected_return(stoch_pi, mdp_env)) print(\"values\", mdp.get_state_values(u_sa, mdp_env)) print('q-values', mdp.get_q_values(u_sa, mdp_env)) #run CVaR optimization, maybe", "enumerate(unique_pts_lambdas): if i in [0,1,2,4]: plt.text(pt[0] - 6.2*offsetx, pt[1] , r\"$\\lambda = {}$\".format(str(pt[2])),", "distribution mdp_env = mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution) print(\"---MDP solution for expectation---\") print(\"mean MDP", "u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False) alpha = 0.99 print(\"calculating optimal policy for", "2000 gamma = 0.95 alpha = 0.99 lamda = 0.9 posterior = generate_posterior_samples(num_samples)", "= mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution) print(\"---MDP solution for expectation---\") print(\"mean MDP reward\", r_sa)", "= generate_reward_sample() all_samples.append(r_sample) print(\"mean of posterior from samples\") print(np.mean(all_samples, axis=0)) posterior = np.array(all_samples)", "lambda_range[i])) unique_pts.append(np.array(pt)) #calculate offset offsetx = (np.max(cvar_rets_array[:,0]) - np.min(cvar_rets_array[:,0]))/30 offsety = (np.max(cvar_rets_array[:,1]) -" ]
[ "# -*- coding: utf-8 -*- from pystocktwits_data_utils import PyStockTwitData from pystocktwits_data_utils.utils import return_json_file", "from pystocktwits_data_utils import PyStockTwitData from pystocktwits_data_utils.utils import return_json_file data = PyStockTwitData() recent_msg =", "PyStockTwitData from pystocktwits_data_utils.utils import return_json_file data = PyStockTwitData() recent_msg = data.get_most_recent_sentiment_by_symbol_id('AAPL') print(recent_msg) return_json_file(recent_msg,", "<reponame>khmurakami/pystocktwits_data_utils #!/usr/bin/env python # -*- coding: utf-8 -*- from pystocktwits_data_utils import PyStockTwitData from", "pystocktwits_data_utils import PyStockTwitData from pystocktwits_data_utils.utils import return_json_file data = PyStockTwitData() recent_msg = data.get_most_recent_sentiment_by_symbol_id('AAPL')", "#!/usr/bin/env python # -*- coding: utf-8 -*- from pystocktwits_data_utils import PyStockTwitData from pystocktwits_data_utils.utils", "utf-8 -*- from pystocktwits_data_utils import PyStockTwitData from pystocktwits_data_utils.utils import return_json_file data = PyStockTwitData()", "from pystocktwits_data_utils.utils import return_json_file data = PyStockTwitData() recent_msg = data.get_most_recent_sentiment_by_symbol_id('AAPL') print(recent_msg) return_json_file(recent_msg, \"../sample_json_output/get_most_recent_sentiment_by_symbol.json\")", "python # -*- coding: utf-8 -*- from pystocktwits_data_utils import PyStockTwitData from pystocktwits_data_utils.utils import", "coding: utf-8 -*- from pystocktwits_data_utils import PyStockTwitData from pystocktwits_data_utils.utils import return_json_file data =", "-*- coding: utf-8 -*- from pystocktwits_data_utils import PyStockTwitData from pystocktwits_data_utils.utils import return_json_file data", "-*- from pystocktwits_data_utils import PyStockTwitData from pystocktwits_data_utils.utils import return_json_file data = PyStockTwitData() recent_msg", "import PyStockTwitData from pystocktwits_data_utils.utils import return_json_file data = PyStockTwitData() recent_msg = data.get_most_recent_sentiment_by_symbol_id('AAPL') print(recent_msg)" ]
[ "of the boilerplate code required to produce an object which implements AccumulatorBase. \"\"\"", "KIND, either express or implied. # See the License for the specific language", "result_name=None): \"\"\"Lazy accumulator decorator. Args: depends_on: list of accumulators on which this value", "Unless required by applicable law or agreed to in writing, software # distributed", "pass # No immediate accumulation def value(self): # Dispatch stored attributes to the", "value, datum): ... return value + datum ... The decorator takes care of", ">>> @Accumulator.immediate() # Don't forget the parentheses here! ... def count(accumulator_set, value, datum):", "its dependencies and # its value identifier (if necessary). In this case, we", "set starting_value: starting value for the accumulator (defaults to 0) \"\"\" return Accumulator._make_wrapper(ImmediateAccuFromFunc,", "under the License. \"\"\"Decorator module provides the @accumulator decorator, which offers a straightforward", "takes a naked function and turns it into something which implements AccumulatorBase. \"\"\"", "datum ... The decorator takes care of the boilerplate code required to produce", "this file except in compliance with the License. # You may obtain a", "implementing trivial accumulators through a simple accumulation function. For example: >>> @Accumulator.immediate() #", "fn): super(LazyAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn def __call__(self, *args, **kwargs): pass #", "the accumulator (defaults to 0) \"\"\" return Accumulator._make_wrapper(ImmediateAccuFromFunc, depends_on, result_name, starting_value) @staticmethod def", "<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "We need the Accumulating object to declare its dependencies and # its value", "ANY KIND, either express or implied. # See the License for the specific", "it into something which implements AccumulatorBase. \"\"\" def __init__(self, accumulator_set, starting_value, fn): super(LazyAccuFromFunc,", "# The wrapped function acts as factory: it produces AccumulatorBase # instances, and", "instances, and forwards decorator parameters to the constructor. @functools.wraps(fn) def wrapped(accumulator_set): return accu_type(accumulator_set,", "set \"\"\" return Accumulator._make_wrapper(LazyAccuFromFunc, depends_on, result_name, starting_value=0) @staticmethod def _make_wrapper(accu_type, depends_on, result_name, starting_value):", "@functools.wraps(fn) def wrapped(accumulator_set): return accu_type(accumulator_set, starting_value, fn) # We need the Accumulating object", "takes care of the boilerplate code required to produce an object which implements", "on which this value depends result_name: name of the shortcut extractor function for", "# information to the wrapped function. wrapped.depends_on = depends_on wrapped.value_identifier = result_name return", "starting_value=0): \"\"\"Immediate accumulator decorator. Args: depends_on: list of accumulators on which this value", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "return accu_type(accumulator_set, starting_value, fn) # We need the Accumulating object to declare its", "governing permissions and # limitations under the License. \"\"\"Decorator module provides the @accumulator", "list of accumulators on which this value depends result_name: name of the shortcut", "implements AccumulatorBase. \"\"\" import functools from accumulators.accumulator_base import AccumulatorBase class ImmediateAccuFromFunc(AccumulatorBase): \"\"\"The ImmediateAccumulatorWrapper", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "starting_value): def _wrapper(fn): # The wrapped function acts as factory: it produces AccumulatorBase", "produces AccumulatorBase # instances, and forwards decorator parameters to the constructor. @functools.wraps(fn) def", "trivial accumulators through a simple accumulation function. For example: >>> @Accumulator.immediate() # Don't", "naked function and turns it into something which implements AccumulatorBase. \"\"\" def __init__(self,", "result_name=None, starting_value=0): \"\"\"Immediate accumulator decorator. Args: depends_on: list of accumulators on which this", "identifier (if necessary). In this case, we attach this # information to the", "OF ANY KIND, either express or implied. # See the License for the", "which offers a straightforward way of implementing trivial accumulators through a simple accumulation", "def lazy(depends_on=[], result_name=None): \"\"\"Lazy accumulator decorator. Args: depends_on: list of accumulators on which", "Accumulator(object): __slots__ = () @staticmethod def immediate(depends_on=[], result_name=None, starting_value=0): \"\"\"Immediate accumulator decorator. Args:", "function acts as factory: it produces AccumulatorBase # instances, and forwards decorator parameters", "fn) # We need the Accumulating object to declare its dependencies and #", "# Copyright 2013 <NAME> # # Licensed under the Apache License, Version 2.0", "required to produce an object which implements AccumulatorBase. \"\"\" import functools from accumulators.accumulator_base", "depends_on, result_name, starting_value): def _wrapper(fn): # The wrapped function acts as factory: it", "**kwargs): # Dispatch stored attributes to the decorated function. self.accu = self.fn(self.accumulator_set, self.accu,", "_make_wrapper(accu_type, depends_on, result_name, starting_value): def _wrapper(fn): # The wrapped function acts as factory:", "AccumulatorBase. \"\"\" import functools from accumulators.accumulator_base import AccumulatorBase class ImmediateAccuFromFunc(AccumulatorBase): \"\"\"The ImmediateAccumulatorWrapper takes", "self.fn(self.accumulator_set) class Accumulator(object): __slots__ = () @staticmethod def immediate(depends_on=[], result_name=None, starting_value=0): \"\"\"Immediate accumulator", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "= fn def __call__(self, *args, **kwargs): pass # No immediate accumulation def value(self):", "boilerplate code required to produce an object which implements AccumulatorBase. \"\"\" import functools", "Dispatch stored attributes to the decorated function. return self.fn(self.accumulator_set) class Accumulator(object): __slots__ =", "an object which implements AccumulatorBase. \"\"\" import functools from accumulators.accumulator_base import AccumulatorBase class", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "and forwards decorator parameters to the constructor. @functools.wraps(fn) def wrapped(accumulator_set): return accu_type(accumulator_set, starting_value,", "depends_on, result_name, starting_value=0) @staticmethod def _make_wrapper(accu_type, depends_on, result_name, starting_value): def _wrapper(fn): # The", "extractor function for the set starting_value: starting value for the accumulator (defaults to", "its value identifier (if necessary). In this case, we attach this # information", "this case, we attach this # information to the wrapped function. wrapped.depends_on =", "straightforward way of implementing trivial accumulators through a simple accumulation function. For example:", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "__call__(self, *args, **kwargs): # Dispatch stored attributes to the decorated function. self.accu =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "_wrapper(fn): # The wrapped function acts as factory: it produces AccumulatorBase # instances,", "# No immediate accumulation def value(self): # Dispatch stored attributes to the decorated", "super(ImmediateAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn def __call__(self, *args, **kwargs): # Dispatch stored", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "value + datum ... The decorator takes care of the boilerplate code required", "Args: depends_on: list of accumulators on which this value depends result_name: name of", "starting_value, fn): super(ImmediateAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn def __call__(self, *args, **kwargs): #", "\"\"\"The ImmediateAccumulatorWrapper takes a naked function and turns it into something which implements", "for the specific language governing permissions and # limitations under the License. \"\"\"Decorator", "required by applicable law or agreed to in writing, software # distributed under", "= self.fn(self.accumulator_set, self.accu, *args, **kwargs) class LazyAccuFromFunc(AccumulatorBase): \"\"\"The LazyAccumulatorWrapper takes a naked function", "this # information to the wrapped function. wrapped.depends_on = depends_on wrapped.value_identifier = result_name", "... The decorator takes care of the boilerplate code required to produce an", "applicable law or agreed to in writing, software # distributed under the License", "Accumulator._make_wrapper(ImmediateAccuFromFunc, depends_on, result_name, starting_value) @staticmethod def lazy(depends_on=[], result_name=None): \"\"\"Lazy accumulator decorator. Args: depends_on:", "provides the @accumulator decorator, which offers a straightforward way of implementing trivial accumulators", "which this value depends result_name: name of the shortcut extractor function for the", "self.fn = fn def __call__(self, *args, **kwargs): pass # No immediate accumulation def", "from accumulators.accumulator_base import AccumulatorBase class ImmediateAccuFromFunc(AccumulatorBase): \"\"\"The ImmediateAccumulatorWrapper takes a naked function and", "or agreed to in writing, software # distributed under the License is distributed", "Dispatch stored attributes to the decorated function. self.accu = self.fn(self.accumulator_set, self.accu, *args, **kwargs)", "language governing permissions and # limitations under the License. \"\"\"Decorator module provides the", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "decorated function. return self.fn(self.accumulator_set) class Accumulator(object): __slots__ = () @staticmethod def immediate(depends_on=[], result_name=None,", "the set starting_value: starting value for the accumulator (defaults to 0) \"\"\" return", "a straightforward way of implementing trivial accumulators through a simple accumulation function. For", "() @staticmethod def immediate(depends_on=[], result_name=None, starting_value=0): \"\"\"Immediate accumulator decorator. Args: depends_on: list of", "function. self.accu = self.fn(self.accumulator_set, self.accu, *args, **kwargs) class LazyAccuFromFunc(AccumulatorBase): \"\"\"The LazyAccumulatorWrapper takes a", "def _make_wrapper(accu_type, depends_on, result_name, starting_value): def _wrapper(fn): # The wrapped function acts as", "ImmediateAccuFromFunc(AccumulatorBase): \"\"\"The ImmediateAccumulatorWrapper takes a naked function and turns it into something which", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "of accumulators on which this value depends result_name: name of the shortcut extractor", "writing, software # distributed under the License is distributed on an \"AS IS\"", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "License. # You may obtain a copy of the License at # #", "accumulator_set, starting_value, fn): super(ImmediateAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn def __call__(self, *args, **kwargs):", "name of the shortcut extractor function for the set starting_value: starting value for", "No immediate accumulation def value(self): # Dispatch stored attributes to the decorated function.", "compliance with the License. # You may obtain a copy of the License", "starting value for the accumulator (defaults to 0) \"\"\" return Accumulator._make_wrapper(ImmediateAccuFromFunc, depends_on, result_name,", "attributes to the decorated function. self.accu = self.fn(self.accumulator_set, self.accu, *args, **kwargs) class LazyAccuFromFunc(AccumulatorBase):", "starting_value, fn) # We need the Accumulating object to declare its dependencies and", "shortcut extractor function for the set \"\"\" return Accumulator._make_wrapper(LazyAccuFromFunc, depends_on, result_name, starting_value=0) @staticmethod", "starting_value) self.fn = fn def __call__(self, *args, **kwargs): pass # No immediate accumulation", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "a naked function and turns it into something which implements AccumulatorBase. \"\"\" def", "Copyright 2013 <NAME> # # Licensed under the Apache License, Version 2.0 (the", "through a simple accumulation function. For example: >>> @Accumulator.immediate() # Don't forget the", "= () @staticmethod def immediate(depends_on=[], result_name=None, starting_value=0): \"\"\"Immediate accumulator decorator. Args: depends_on: list", "parameters to the constructor. @functools.wraps(fn) def wrapped(accumulator_set): return accu_type(accumulator_set, starting_value, fn) # We", "accumulators on which this value depends result_name: name of the shortcut extractor function", "the set \"\"\" return Accumulator._make_wrapper(LazyAccuFromFunc, depends_on, result_name, starting_value=0) @staticmethod def _make_wrapper(accu_type, depends_on, result_name,", "__slots__ = () @staticmethod def immediate(depends_on=[], result_name=None, starting_value=0): \"\"\"Immediate accumulator decorator. Args: depends_on:", "not use this file except in compliance with the License. # You may", "The decorator takes care of the boilerplate code required to produce an object", "The wrapped function acts as factory: it produces AccumulatorBase # instances, and forwards", "def wrapped(accumulator_set): return accu_type(accumulator_set, starting_value, fn) # We need the Accumulating object to", "import functools from accumulators.accumulator_base import AccumulatorBase class ImmediateAccuFromFunc(AccumulatorBase): \"\"\"The ImmediateAccumulatorWrapper takes a naked", "to produce an object which implements AccumulatorBase. \"\"\" import functools from accumulators.accumulator_base import", "License, Version 2.0 (the \"License\"); # you may not use this file except", "the decorated function. self.accu = self.fn(self.accumulator_set, self.accu, *args, **kwargs) class LazyAccuFromFunc(AccumulatorBase): \"\"\"The LazyAccumulatorWrapper", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "class ImmediateAccuFromFunc(AccumulatorBase): \"\"\"The ImmediateAccumulatorWrapper takes a naked function and turns it into something", "of implementing trivial accumulators through a simple accumulation function. For example: >>> @Accumulator.immediate()", "# you may not use this file except in compliance with the License.", "# Don't forget the parentheses here! ... def count(accumulator_set, value, datum): ... return", "to the constructor. @functools.wraps(fn) def wrapped(accumulator_set): return accu_type(accumulator_set, starting_value, fn) # We need", "agreed to in writing, software # distributed under the License is distributed on", "depends result_name: name of the shortcut extractor function for the set \"\"\" return", "something which implements AccumulatorBase. \"\"\" def __init__(self, accumulator_set, starting_value, fn): super(LazyAccuFromFunc, self).__init__(accumulator_set, starting_value)", "(the \"License\"); # you may not use this file except in compliance with", "count(accumulator_set, value, datum): ... return value + datum ... The decorator takes care", "into something which implements AccumulatorBase. \"\"\" def __init__(self, accumulator_set, starting_value, fn): super(ImmediateAccuFromFunc, self).__init__(accumulator_set,", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "value(self): # Dispatch stored attributes to the decorated function. return self.fn(self.accumulator_set) class Accumulator(object):", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "ImmediateAccumulatorWrapper takes a naked function and turns it into something which implements AccumulatorBase.", "and turns it into something which implements AccumulatorBase. \"\"\" def __init__(self, accumulator_set, starting_value,", "decorator. Args: depends_on: list of accumulators on which this value depends result_name: name", "way of implementing trivial accumulators through a simple accumulation function. For example: >>>", "AccumulatorBase. \"\"\" def __init__(self, accumulator_set, starting_value, fn): super(LazyAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn", "lazy(depends_on=[], result_name=None): \"\"\"Lazy accumulator decorator. Args: depends_on: list of accumulators on which this", "file except in compliance with the License. # You may obtain a copy", "__init__(self, accumulator_set, starting_value, fn): super(LazyAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn def __call__(self, *args,", "License for the specific language governing permissions and # limitations under the License.", "the decorated function. return self.fn(self.accumulator_set) class Accumulator(object): __slots__ = () @staticmethod def immediate(depends_on=[],", "limitations under the License. \"\"\"Decorator module provides the @accumulator decorator, which offers a", "object which implements AccumulatorBase. \"\"\" import functools from accumulators.accumulator_base import AccumulatorBase class ImmediateAccuFromFunc(AccumulatorBase):", "wrapped(accumulator_set): return accu_type(accumulator_set, starting_value, fn) # We need the Accumulating object to declare", "<gh_stars>1-10 # Copyright 2013 <NAME> # # Licensed under the Apache License, Version", "to in writing, software # distributed under the License is distributed on an", "function and turns it into something which implements AccumulatorBase. \"\"\" def __init__(self, accumulator_set,", "implied. # See the License for the specific language governing permissions and #", "def __call__(self, *args, **kwargs): pass # No immediate accumulation def value(self): # Dispatch", "\"License\"); # you may not use this file except in compliance with the", "declare its dependencies and # its value identifier (if necessary). In this case,", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "case, we attach this # information to the wrapped function. wrapped.depends_on = depends_on", "the License. \"\"\"Decorator module provides the @accumulator decorator, which offers a straightforward way", "simple accumulation function. For example: >>> @Accumulator.immediate() # Don't forget the parentheses here!", "produce an object which implements AccumulatorBase. \"\"\" import functools from accumulators.accumulator_base import AccumulatorBase", "\"\"\"Lazy accumulator decorator. Args: depends_on: list of accumulators on which this value depends", "# Dispatch stored attributes to the decorated function. return self.fn(self.accumulator_set) class Accumulator(object): __slots__", "value for the accumulator (defaults to 0) \"\"\" return Accumulator._make_wrapper(ImmediateAccuFromFunc, depends_on, result_name, starting_value)", "turns it into something which implements AccumulatorBase. \"\"\" def __init__(self, accumulator_set, starting_value, fn):", "function for the set starting_value: starting value for the accumulator (defaults to 0)", "or implied. # See the License for the specific language governing permissions and", "**kwargs) class LazyAccuFromFunc(AccumulatorBase): \"\"\"The LazyAccumulatorWrapper takes a naked function and turns it into", "to the decorated function. return self.fn(self.accumulator_set) class Accumulator(object): __slots__ = () @staticmethod def", "specific language governing permissions and # limitations under the License. \"\"\"Decorator module provides", "self).__init__(accumulator_set, starting_value) self.fn = fn def __call__(self, *args, **kwargs): # Dispatch stored attributes", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "fn): super(ImmediateAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn def __call__(self, *args, **kwargs): # Dispatch", "*args, **kwargs): pass # No immediate accumulation def value(self): # Dispatch stored attributes", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "this value depends result_name: name of the shortcut extractor function for the set", "@staticmethod def lazy(depends_on=[], result_name=None): \"\"\"Lazy accumulator decorator. Args: depends_on: list of accumulators on", "depends result_name: name of the shortcut extractor function for the set starting_value: starting", "forwards decorator parameters to the constructor. @functools.wraps(fn) def wrapped(accumulator_set): return accu_type(accumulator_set, starting_value, fn)", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "value depends result_name: name of the shortcut extractor function for the set starting_value:", "datum): ... return value + datum ... The decorator takes care of the", "function. For example: >>> @Accumulator.immediate() # Don't forget the parentheses here! ... def", "the boilerplate code required to produce an object which implements AccumulatorBase. \"\"\" import", "extractor function for the set \"\"\" return Accumulator._make_wrapper(LazyAccuFromFunc, depends_on, result_name, starting_value=0) @staticmethod def", "accumulation def value(self): # Dispatch stored attributes to the decorated function. return self.fn(self.accumulator_set)", "the Accumulating object to declare its dependencies and # its value identifier (if", "accumulation function. For example: >>> @Accumulator.immediate() # Don't forget the parentheses here! ...", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "(defaults to 0) \"\"\" return Accumulator._make_wrapper(ImmediateAccuFromFunc, depends_on, result_name, starting_value) @staticmethod def lazy(depends_on=[], result_name=None):", "you may not use this file except in compliance with the License. #", "for the set \"\"\" return Accumulator._make_wrapper(LazyAccuFromFunc, depends_on, result_name, starting_value=0) @staticmethod def _make_wrapper(accu_type, depends_on,", "@staticmethod def _make_wrapper(accu_type, depends_on, result_name, starting_value): def _wrapper(fn): # The wrapped function acts", "use this file except in compliance with the License. # You may obtain", "LazyAccumulatorWrapper takes a naked function and turns it into something which implements AccumulatorBase.", "self.fn = fn def __call__(self, *args, **kwargs): # Dispatch stored attributes to the", "immediate accumulation def value(self): # Dispatch stored attributes to the decorated function. return", "AccumulatorBase # instances, and forwards decorator parameters to the constructor. @functools.wraps(fn) def wrapped(accumulator_set):", "of the shortcut extractor function for the set starting_value: starting value for the", "for the accumulator (defaults to 0) \"\"\" return Accumulator._make_wrapper(ImmediateAccuFromFunc, depends_on, result_name, starting_value) @staticmethod", "here! ... def count(accumulator_set, value, datum): ... return value + datum ... The", "the wrapped function. wrapped.depends_on = depends_on wrapped.value_identifier = result_name return wrapped return _wrapper", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "as factory: it produces AccumulatorBase # instances, and forwards decorator parameters to the", "fn def __call__(self, *args, **kwargs): pass # No immediate accumulation def value(self): #", "result_name, starting_value): def _wrapper(fn): # The wrapped function acts as factory: it produces", "return self.fn(self.accumulator_set) class Accumulator(object): __slots__ = () @staticmethod def immediate(depends_on=[], result_name=None, starting_value=0): \"\"\"Immediate", "result_name, starting_value=0) @staticmethod def _make_wrapper(accu_type, depends_on, result_name, starting_value): def _wrapper(fn): # The wrapped", "\"\"\"The LazyAccumulatorWrapper takes a naked function and turns it into something which implements", "attach this # information to the wrapped function. wrapped.depends_on = depends_on wrapped.value_identifier =", "function. return self.fn(self.accumulator_set) class Accumulator(object): __slots__ = () @staticmethod def immediate(depends_on=[], result_name=None, starting_value=0):", "import AccumulatorBase class ImmediateAccuFromFunc(AccumulatorBase): \"\"\"The ImmediateAccumulatorWrapper takes a naked function and turns it", "return Accumulator._make_wrapper(LazyAccuFromFunc, depends_on, result_name, starting_value=0) @staticmethod def _make_wrapper(accu_type, depends_on, result_name, starting_value): def _wrapper(fn):", "wrapped function acts as factory: it produces AccumulatorBase # instances, and forwards decorator", "2.0 (the \"License\"); # you may not use this file except in compliance", "factory: it produces AccumulatorBase # instances, and forwards decorator parameters to the constructor.", "*args, **kwargs): # Dispatch stored attributes to the decorated function. self.accu = self.fn(self.accumulator_set,", "something which implements AccumulatorBase. \"\"\" def __init__(self, accumulator_set, starting_value, fn): super(ImmediateAccuFromFunc, self).__init__(accumulator_set, starting_value)", "self.accu, *args, **kwargs) class LazyAccuFromFunc(AccumulatorBase): \"\"\"The LazyAccumulatorWrapper takes a naked function and turns", "result_name: name of the shortcut extractor function for the set starting_value: starting value", "**kwargs): pass # No immediate accumulation def value(self): # Dispatch stored attributes to", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "self.accu = self.fn(self.accumulator_set, self.accu, *args, **kwargs) class LazyAccuFromFunc(AccumulatorBase): \"\"\"The LazyAccumulatorWrapper takes a naked", "... return value + datum ... The decorator takes care of the boilerplate", "*args, **kwargs) class LazyAccuFromFunc(AccumulatorBase): \"\"\"The LazyAccumulatorWrapper takes a naked function and turns it", "# # Unless required by applicable law or agreed to in writing, software", "# limitations under the License. \"\"\"Decorator module provides the @accumulator decorator, which offers", "accumulators through a simple accumulation function. For example: >>> @Accumulator.immediate() # Don't forget", "express or implied. # See the License for the specific language governing permissions", "AccumulatorBase class ImmediateAccuFromFunc(AccumulatorBase): \"\"\"The ImmediateAccumulatorWrapper takes a naked function and turns it into", "super(LazyAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn def __call__(self, *args, **kwargs): pass # No", "to the decorated function. self.accu = self.fn(self.accumulator_set, self.accu, *args, **kwargs) class LazyAccuFromFunc(AccumulatorBase): \"\"\"The", "def immediate(depends_on=[], result_name=None, starting_value=0): \"\"\"Immediate accumulator decorator. Args: depends_on: list of accumulators on", "For example: >>> @Accumulator.immediate() # Don't forget the parentheses here! ... def count(accumulator_set,", "either express or implied. # See the License for the specific language governing", "fn def __call__(self, *args, **kwargs): # Dispatch stored attributes to the decorated function.", "\"\"\" return Accumulator._make_wrapper(ImmediateAccuFromFunc, depends_on, result_name, starting_value) @staticmethod def lazy(depends_on=[], result_name=None): \"\"\"Lazy accumulator decorator.", "and # its value identifier (if necessary). In this case, we attach this", "result_name: name of the shortcut extractor function for the set \"\"\" return Accumulator._make_wrapper(LazyAccuFromFunc,", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "License. \"\"\"Decorator module provides the @accumulator decorator, which offers a straightforward way of", "(if necessary). In this case, we attach this # information to the wrapped", "self).__init__(accumulator_set, starting_value) self.fn = fn def __call__(self, *args, **kwargs): pass # No immediate", "\"\"\"Decorator module provides the @accumulator decorator, which offers a straightforward way of implementing", "self.fn(self.accumulator_set, self.accu, *args, **kwargs) class LazyAccuFromFunc(AccumulatorBase): \"\"\"The LazyAccumulatorWrapper takes a naked function and", "the License. # You may obtain a copy of the License at #", "# instances, and forwards decorator parameters to the constructor. @functools.wraps(fn) def wrapped(accumulator_set): return", "necessary). In this case, we attach this # information to the wrapped function.", "decorator takes care of the boilerplate code required to produce an object which", "2013 <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");", "starting_value) self.fn = fn def __call__(self, *args, **kwargs): # Dispatch stored attributes to", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implements AccumulatorBase. \"\"\" def __init__(self, accumulator_set, starting_value, fn): super(LazyAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn =", "to the wrapped function. wrapped.depends_on = depends_on wrapped.value_identifier = result_name return wrapped return", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "= fn def __call__(self, *args, **kwargs): # Dispatch stored attributes to the decorated", "\"\"\" return Accumulator._make_wrapper(LazyAccuFromFunc, depends_on, result_name, starting_value=0) @staticmethod def _make_wrapper(accu_type, depends_on, result_name, starting_value): def", "code required to produce an object which implements AccumulatorBase. \"\"\" import functools from", "\"\"\"Immediate accumulator decorator. Args: depends_on: list of accumulators on which this value depends", "acts as factory: it produces AccumulatorBase # instances, and forwards decorator parameters to", "starting_value=0) @staticmethod def _make_wrapper(accu_type, depends_on, result_name, starting_value): def _wrapper(fn): # The wrapped function", "def count(accumulator_set, value, datum): ... return value + datum ... The decorator takes", "forget the parentheses here! ... def count(accumulator_set, value, datum): ... return value +", "and # limitations under the License. \"\"\"Decorator module provides the @accumulator decorator, which", "class Accumulator(object): __slots__ = () @staticmethod def immediate(depends_on=[], result_name=None, starting_value=0): \"\"\"Immediate accumulator decorator.", "function for the set \"\"\" return Accumulator._make_wrapper(LazyAccuFromFunc, depends_on, result_name, starting_value=0) @staticmethod def _make_wrapper(accu_type,", "\"\"\" def __init__(self, accumulator_set, starting_value, fn): super(ImmediateAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn def", "0) \"\"\" return Accumulator._make_wrapper(ImmediateAccuFromFunc, depends_on, result_name, starting_value) @staticmethod def lazy(depends_on=[], result_name=None): \"\"\"Lazy accumulator", "value depends result_name: name of the shortcut extractor function for the set \"\"\"", "with the License. # You may obtain a copy of the License at", "def __call__(self, *args, **kwargs): # Dispatch stored attributes to the decorated function. self.accu", "information to the wrapped function. wrapped.depends_on = depends_on wrapped.value_identifier = result_name return wrapped", "into something which implements AccumulatorBase. \"\"\" def __init__(self, accumulator_set, starting_value, fn): super(LazyAccuFromFunc, self).__init__(accumulator_set,", "result_name, starting_value) @staticmethod def lazy(depends_on=[], result_name=None): \"\"\"Lazy accumulator decorator. Args: depends_on: list of", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "def __init__(self, accumulator_set, starting_value, fn): super(ImmediateAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn def __call__(self,", "def _wrapper(fn): # The wrapped function acts as factory: it produces AccumulatorBase #", "a simple accumulation function. For example: >>> @Accumulator.immediate() # Don't forget the parentheses", "permissions and # limitations under the License. \"\"\"Decorator module provides the @accumulator decorator,", "Accumulator._make_wrapper(LazyAccuFromFunc, depends_on, result_name, starting_value=0) @staticmethod def _make_wrapper(accu_type, depends_on, result_name, starting_value): def _wrapper(fn): #", "In this case, we attach this # information to the wrapped function. wrapped.depends_on", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "starting_value) @staticmethod def lazy(depends_on=[], result_name=None): \"\"\"Lazy accumulator decorator. Args: depends_on: list of accumulators", "which implements AccumulatorBase. \"\"\" import functools from accumulators.accumulator_base import AccumulatorBase class ImmediateAccuFromFunc(AccumulatorBase): \"\"\"The", "need the Accumulating object to declare its dependencies and # its value identifier", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "name of the shortcut extractor function for the set \"\"\" return Accumulator._make_wrapper(LazyAccuFromFunc, depends_on,", "attributes to the decorated function. return self.fn(self.accumulator_set) class Accumulator(object): __slots__ = () @staticmethod", "constructor. @functools.wraps(fn) def wrapped(accumulator_set): return accu_type(accumulator_set, starting_value, fn) # We need the Accumulating", "module provides the @accumulator decorator, which offers a straightforward way of implementing trivial", "the @accumulator decorator, which offers a straightforward way of implementing trivial accumulators through", "@accumulator decorator, which offers a straightforward way of implementing trivial accumulators through a", "dependencies and # its value identifier (if necessary). In this case, we attach", "the shortcut extractor function for the set \"\"\" return Accumulator._make_wrapper(LazyAccuFromFunc, depends_on, result_name, starting_value=0)", "shortcut extractor function for the set starting_value: starting value for the accumulator (defaults", "__call__(self, *args, **kwargs): pass # No immediate accumulation def value(self): # Dispatch stored", "which implements AccumulatorBase. \"\"\" def __init__(self, accumulator_set, starting_value, fn): super(ImmediateAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn", "offers a straightforward way of implementing trivial accumulators through a simple accumulation function.", "\"\"\" def __init__(self, accumulator_set, starting_value, fn): super(LazyAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn def", "immediate(depends_on=[], result_name=None, starting_value=0): \"\"\"Immediate accumulator decorator. Args: depends_on: list of accumulators on which", "stored attributes to the decorated function. return self.fn(self.accumulator_set) class Accumulator(object): __slots__ = ()", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "decorated function. self.accu = self.fn(self.accumulator_set, self.accu, *args, **kwargs) class LazyAccuFromFunc(AccumulatorBase): \"\"\"The LazyAccumulatorWrapper takes", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "Don't forget the parentheses here! ... def count(accumulator_set, value, datum): ... return value", "depends_on: list of accumulators on which this value depends result_name: name of the", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "value identifier (if necessary). In this case, we attach this # information to", "accumulator decorator. Args: depends_on: list of accumulators on which this value depends result_name:", "\"\"\" import functools from accumulators.accumulator_base import AccumulatorBase class ImmediateAccuFromFunc(AccumulatorBase): \"\"\"The ImmediateAccumulatorWrapper takes a", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "parentheses here! ... def count(accumulator_set, value, datum): ... return value + datum ...", "care of the boilerplate code required to produce an object which implements AccumulatorBase.", "accumulators.accumulator_base import AccumulatorBase class ImmediateAccuFromFunc(AccumulatorBase): \"\"\"The ImmediateAccumulatorWrapper takes a naked function and turns", "accumulator_set, starting_value, fn): super(LazyAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn def __call__(self, *args, **kwargs):", "decorator, which offers a straightforward way of implementing trivial accumulators through a simple", "implements AccumulatorBase. \"\"\" def __init__(self, accumulator_set, starting_value, fn): super(ImmediateAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn =", "example: >>> @Accumulator.immediate() # Don't forget the parentheses here! ... def count(accumulator_set, value,", "__init__(self, accumulator_set, starting_value, fn): super(ImmediateAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn def __call__(self, *args,", "class LazyAccuFromFunc(AccumulatorBase): \"\"\"The LazyAccumulatorWrapper takes a naked function and turns it into something", "Accumulating object to declare its dependencies and # its value identifier (if necessary).", "functools from accumulators.accumulator_base import AccumulatorBase class ImmediateAccuFromFunc(AccumulatorBase): \"\"\"The ImmediateAccumulatorWrapper takes a naked function", "starting_value: starting value for the accumulator (defaults to 0) \"\"\" return Accumulator._make_wrapper(ImmediateAccuFromFunc, depends_on,", "# its value identifier (if necessary). In this case, we attach this #", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "the shortcut extractor function for the set starting_value: starting value for the accumulator", "@staticmethod def immediate(depends_on=[], result_name=None, starting_value=0): \"\"\"Immediate accumulator decorator. Args: depends_on: list of accumulators", "return value + datum ... The decorator takes care of the boilerplate code", "which implements AccumulatorBase. \"\"\" def __init__(self, accumulator_set, starting_value, fn): super(LazyAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "decorator parameters to the constructor. @functools.wraps(fn) def wrapped(accumulator_set): return accu_type(accumulator_set, starting_value, fn) #", "of the shortcut extractor function for the set \"\"\" return Accumulator._make_wrapper(LazyAccuFromFunc, depends_on, result_name,", "stored attributes to the decorated function. self.accu = self.fn(self.accumulator_set, self.accu, *args, **kwargs) class", "object to declare its dependencies and # its value identifier (if necessary). In", "the specific language governing permissions and # limitations under the License. \"\"\"Decorator module", "return Accumulator._make_wrapper(ImmediateAccuFromFunc, depends_on, result_name, starting_value) @staticmethod def lazy(depends_on=[], result_name=None): \"\"\"Lazy accumulator decorator. Args:", "depends_on, result_name, starting_value) @staticmethod def lazy(depends_on=[], result_name=None): \"\"\"Lazy accumulator decorator. Args: depends_on: list", "... def count(accumulator_set, value, datum): ... return value + datum ... The decorator", "it into something which implements AccumulatorBase. \"\"\" def __init__(self, accumulator_set, starting_value, fn): super(ImmediateAccuFromFunc,", "AccumulatorBase. \"\"\" def __init__(self, accumulator_set, starting_value, fn): super(ImmediateAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn", "accumulator (defaults to 0) \"\"\" return Accumulator._make_wrapper(ImmediateAccuFromFunc, depends_on, result_name, starting_value) @staticmethod def lazy(depends_on=[],", "+ datum ... The decorator takes care of the boilerplate code required to", "accu_type(accumulator_set, starting_value, fn) # We need the Accumulating object to declare its dependencies", "to 0) \"\"\" return Accumulator._make_wrapper(ImmediateAccuFromFunc, depends_on, result_name, starting_value) @staticmethod def lazy(depends_on=[], result_name=None): \"\"\"Lazy", "for the set starting_value: starting value for the accumulator (defaults to 0) \"\"\"", "starting_value, fn): super(LazyAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn def __call__(self, *args, **kwargs): pass", "it produces AccumulatorBase # instances, and forwards decorator parameters to the constructor. @functools.wraps(fn)", "@Accumulator.immediate() # Don't forget the parentheses here! ... def count(accumulator_set, value, datum): ...", "LazyAccuFromFunc(AccumulatorBase): \"\"\"The LazyAccumulatorWrapper takes a naked function and turns it into something which", "to declare its dependencies and # its value identifier (if necessary). In this", "the parentheses here! ... def count(accumulator_set, value, datum): ... return value + datum", "# Dispatch stored attributes to the decorated function. self.accu = self.fn(self.accumulator_set, self.accu, *args,", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "we attach this # information to the wrapped function. wrapped.depends_on = depends_on wrapped.value_identifier", "def value(self): # Dispatch stored attributes to the decorated function. return self.fn(self.accumulator_set) class", "def __init__(self, accumulator_set, starting_value, fn): super(LazyAccuFromFunc, self).__init__(accumulator_set, starting_value) self.fn = fn def __call__(self,", "the constructor. @functools.wraps(fn) def wrapped(accumulator_set): return accu_type(accumulator_set, starting_value, fn) # We need the", "# We need the Accumulating object to declare its dependencies and # its" ]
[ "range(3,sn+1,2): if all((attempt % prime != 0 and n%attempt==0) for prime in prime1):", "and n%attempt==0) for prime in prime1): prime1.append(attempt) end = time.time() print(end - start)", "def primes(n): start = time.time() prime1 = [2] sn=int(math.sqrt(n)) for attempt in range(3,sn+1,2):", "in range(3,sn+1,2): if all((attempt % prime != 0 and n%attempt==0) for prime in", "% prime != 0 and n%attempt==0) for prime in prime1): prime1.append(attempt) end =", "math @profile def primes(n): start = time.time() prime1 = [2] sn=int(math.sqrt(n)) for attempt", "= [2] sn=int(math.sqrt(n)) for attempt in range(3,sn+1,2): if all((attempt % prime != 0", "for prime in prime1): prime1.append(attempt) end = time.time() print(end - start) return prime1", "prime1 = [2] sn=int(math.sqrt(n)) for attempt in range(3,sn+1,2): if all((attempt % prime !=", "if all((attempt % prime != 0 and n%attempt==0) for prime in prime1): prime1.append(attempt)", "0 and n%attempt==0) for prime in prime1): prime1.append(attempt) end = time.time() print(end -", "all((attempt % prime != 0 and n%attempt==0) for prime in prime1): prime1.append(attempt) end", "@profile def primes(n): start = time.time() prime1 = [2] sn=int(math.sqrt(n)) for attempt in", "primes(n): start = time.time() prime1 = [2] sn=int(math.sqrt(n)) for attempt in range(3,sn+1,2): if", "prime in prime1): prime1.append(attempt) end = time.time() print(end - start) return prime1 n=primes(600851475143)", "= time.time() prime1 = [2] sn=int(math.sqrt(n)) for attempt in range(3,sn+1,2): if all((attempt %", "attempt in range(3,sn+1,2): if all((attempt % prime != 0 and n%attempt==0) for prime", "prime != 0 and n%attempt==0) for prime in prime1): prime1.append(attempt) end = time.time()", "n%attempt==0) for prime in prime1): prime1.append(attempt) end = time.time() print(end - start) return", "[2] sn=int(math.sqrt(n)) for attempt in range(3,sn+1,2): if all((attempt % prime != 0 and", "start = time.time() prime1 = [2] sn=int(math.sqrt(n)) for attempt in range(3,sn+1,2): if all((attempt", "sn=int(math.sqrt(n)) for attempt in range(3,sn+1,2): if all((attempt % prime != 0 and n%attempt==0)", "!= 0 and n%attempt==0) for prime in prime1): prime1.append(attempt) end = time.time() print(end", "import time import math @profile def primes(n): start = time.time() prime1 = [2]", "in prime1): prime1.append(attempt) end = time.time() print(end - start) return prime1 n=primes(600851475143) print(max(n))", "time import math @profile def primes(n): start = time.time() prime1 = [2] sn=int(math.sqrt(n))", "time.time() prime1 = [2] sn=int(math.sqrt(n)) for attempt in range(3,sn+1,2): if all((attempt % prime", "for attempt in range(3,sn+1,2): if all((attempt % prime != 0 and n%attempt==0) for", "import math @profile def primes(n): start = time.time() prime1 = [2] sn=int(math.sqrt(n)) for" ]
[ "not supported yet') self.specials = self.tokenizer.all_special_tokens def _whole_word_cand_indexes_bert(self, input_tokens: List[str]): cand_indexes = []", "KIND, either express or implied. # See the License for the specific language", "self.tokenizer.all_special_tokens def _whole_word_cand_indexes_bert(self, input_tokens: List[str]): cand_indexes = [] for (i, token) in enumerate(input_tokens):", "Unless required by applicable law or agreed to in writing, software # distributed", "len(cand_indexes) >= 1 and token.startswith(\"##\"): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def _whole_word_cand_indexes_roberta(self, input_tokens:", "elif isinstance(self.tokenizer, (RobertaTokenizer, RobertaTokenizerFast)): self.whole_word_cand_indexes = self. _whole_word_cand_indexes_roberta else: raise NotImplementedError(f'{type(self.tokenizer)} collator not", "len(masked_lms) mask_labels = [1 if i in covered_indexes else 0 for i in", "len(truncated) == tgt_len: print(len(example), len(truncated), trunc_left, trunc_right, tgt_len, flush=True) raise ValueError return truncated", "in enumerate(input_tokens): if token in self.specials: continue if len(cand_indexes) >= 1 and token.startswith(\"##\"):", "cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def _whole_word_cand_indexes_roberta(self, input_tokens: List[str]): cand_indexes = [] for", "= self.mask_tokens( torch.tensor(encoded_examples, dtype=torch.long), torch.tensor(mlm_masks, dtype=torch.long) ) batch = { \"input_ids\": inputs, \"labels\":", "== 0: cand_indexes.append([0]) elif not token.startswith('\\u0120'): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def _whole_word_mask(self,", "in examples: e_trunc = self._truncate(e['text']) tokens = [self.tokenizer._convert_id_to_token(tid) for tid in e_trunc] mlm_mask", "len(index_set) > num_to_predict: continue is_any_index_covered = False for index in index_set: if index", "+ [val for _ in range(tgt_len - len(seq))] def __call__(self, examples: List[Dict[str, List[int]]]):", "padding=\"max_length\", truncation=True, return_token_type_ids=False, ) masks.append(encoded['attention_mask']) encoded_examples.append(encoded['input_ids']) inputs, labels = self.mask_tokens( torch.tensor(encoded_examples, dtype=torch.long), torch.tensor(mlm_masks,", "torch.tensor(encoded_examples, dtype=torch.long), torch.tensor(mlm_masks, dtype=torch.long) ) batch = { \"input_ids\": inputs, \"labels\": labels, \"attention_mask\":", "mlm_masks = [] for e in examples: e_trunc = self._truncate(e['text']) tokens = [self.tokenizer._convert_id_to_token(tid)", "if is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_lms.append(index) assert len(covered_indexes) == len(masked_lms)", "import Dataset from transformers import DataCollatorForWholeWordMask @dataclass class CondenserCollator(DataCollatorForWholeWordMask): max_seq_length: int = 512", "limitations under the License. import random from dataclasses import dataclass from typing import", "current implementation') if i == 0: cand_indexes.append([0]) elif not token.startswith('\\u0120'): cand_indexes[-1].append(i) else: cand_indexes.append([i])", "if len(cand_indexes) >= 1 and token.startswith(\"##\"): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def _whole_word_cand_indexes_roberta(self,", "cand_indexes.append([i]) return cand_indexes def _whole_word_mask(self, input_tokens: List[str], max_predictions=512): \"\"\" Get 0/1 labels for", "for current implementation') if i == 0: cand_indexes.append([0]) elif not token.startswith('\\u0120'): cand_indexes[-1].append(i) else:", "tgt_len, flush=True) raise ValueError return truncated def _pad(self, seq, val=0): tgt_len = self.max_seq_length", "e_trunc] mlm_mask = self._whole_word_mask(tokens) mlm_mask = self._pad([0] + mlm_mask) mlm_masks.append(mlm_mask) encoded = self.tokenizer.encode_plus(", "num_to_predict: continue is_any_index_covered = False for index in index_set: if index in covered_indexes:", "this file except in compliance with the License. # You may obtain a", "truncated = example[trunc_left:] if trunc_right > 0: truncated = truncated[:-trunc_right] if not len(truncated)", "in cand_indexes: if len(masked_lms) >= num_to_predict: break # If adding a whole-word mask", "random from dataclasses import dataclass from typing import List, Dict import torch from", "BertTokenizer, BertTokenizerFast from transformers import RobertaTokenizer, RobertaTokenizerFast if isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)): self.whole_word_cand_indexes =", "input_tokens: List[str]): cand_indexes = [] for (i, token) in enumerate(input_tokens): if token in", "tgt_len = self.max_seq_length - self.tokenizer.num_special_tokens_to_add(False) if len(example) <= tgt_len: return example trunc =", "word mask proxy \"\"\" cand_indexes = self._whole_word_cand_indexes_bert(input_tokens) random.shuffle(cand_indexes) num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens)", "for e in examples: e_trunc = self._truncate(e['text']) tokens = [self.tokenizer._convert_id_to_token(tid) for tid in", "def __call__(self, examples): examples = sum(examples, []) examples = [{'text': e} for e", "[] covered_indexes = set() for index_set in cand_indexes: if len(masked_lms) >= num_to_predict: break", "ANY KIND, either express or implied. # See the License for the specific", "def _whole_word_cand_indexes_roberta(self, input_tokens: List[str]): cand_indexes = [] for (i, token) in enumerate(input_tokens): if", "cand_indexes def _whole_word_cand_indexes_roberta(self, input_tokens: List[str]): cand_indexes = [] for (i, token) in enumerate(input_tokens):", "# If adding a whole-word mask would exceed the maximum number of #", "in range(tgt_len - len(seq))] def __call__(self, examples: List[Dict[str, List[int]]]): encoded_examples = [] masks", "Get 0/1 labels for masked tokens with whole word mask proxy \"\"\" cand_indexes", "= 512 def __post_init__(self): super(CondenserCollator, self).__post_init__() from transformers import BertTokenizer, BertTokenizerFast from transformers", "_ in range(tgt_len - len(seq))] def __call__(self, examples: List[Dict[str, List[int]]]): encoded_examples = []", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "return truncated def _pad(self, seq, val=0): tgt_len = self.max_seq_length assert len(seq) <= tgt_len", "= self._truncate(e['text']) tokens = [self.tokenizer._convert_id_to_token(tid) for tid in e_trunc] mlm_mask = self._whole_word_mask(tokens) mlm_mask", "reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "List[int]]]): encoded_examples = [] masks = [] mlm_masks = [] for e in", "class CoCondenserCollator(CondenserCollator): def __call__(self, examples): examples = sum(examples, []) examples = [{'text': e}", "import BertTokenizer, BertTokenizerFast from transformers import RobertaTokenizer, RobertaTokenizerFast if isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)): self.whole_word_cand_indexes", "continue for index in index_set: covered_indexes.add(index) masked_lms.append(index) assert len(covered_indexes) == len(masked_lms) mask_labels =", "predictions, then just skip this candidate. if len(masked_lms) + len(index_set) > num_to_predict: continue", "self.whole_word_cand_indexes = self._whole_word_cand_indexes_bert elif isinstance(self.tokenizer, (RobertaTokenizer, RobertaTokenizerFast)): self.whole_word_cand_indexes = self. _whole_word_cand_indexes_roberta else: raise", "int(round(len(input_tokens) * self.mlm_probability)))) masked_lms = [] covered_indexes = set() for index_set in cand_indexes:", "import DataCollatorForWholeWordMask @dataclass class CondenserCollator(DataCollatorForWholeWordMask): max_seq_length: int = 512 def __post_init__(self): super(CondenserCollator, self).__post_init__()", "this candidate. if len(masked_lms) + len(index_set) > num_to_predict: continue is_any_index_covered = False for", "return seq + [val for _ in range(tgt_len - len(seq))] def __call__(self, examples:", "OF ANY KIND, either express or implied. # See the License for the", "[{'text': e} for e in examples] return super(CoCondenserCollator, self).__call__(examples) class CoCondenserDataset(Dataset): def __init__(self,", "data_args def __len__(self): return len(self.dataset) def __getitem__(self, item): spans = self.dataset[item]['spans'] return random.sample(spans,", "tokens = [self.tokenizer._convert_id_to_token(tid) for tid in e_trunc] mlm_mask = self._whole_word_mask(tokens) mlm_mask = self._pad([0]", "token) in enumerate(input_tokens): if token in self.specials: raise ValueError('We expect only raw input", "self.tokenizer.num_special_tokens_to_add(False) if len(example) <= tgt_len: return example trunc = len(example) - tgt_len trunc_left", "= self._whole_word_mask(tokens) mlm_mask = self._pad([0] + mlm_mask) mlm_masks.append(mlm_mask) encoded = self.tokenizer.encode_plus( self._truncate(e['text']), add_special_tokens=True,", "= self._whole_word_cand_indexes_bert(input_tokens) random.shuffle(cand_indexes) num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability)))) masked_lms = []", "Dataset from transformers import DataCollatorForWholeWordMask @dataclass class CondenserCollator(DataCollatorForWholeWordMask): max_seq_length: int = 512 def", "sum(examples, []) examples = [{'text': e} for e in examples] return super(CoCondenserCollator, self).__call__(examples)", "candidate. if len(masked_lms) + len(index_set) > num_to_predict: continue is_any_index_covered = False for index", "if not len(truncated) == tgt_len: print(len(example), len(truncated), trunc_left, trunc_right, tgt_len, flush=True) raise ValueError", "def _whole_word_cand_indexes_bert(self, input_tokens: List[str]): cand_indexes = [] for (i, token) in enumerate(input_tokens): if", "token.startswith('\\u0120'): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def _whole_word_mask(self, input_tokens: List[str], max_predictions=512): \"\"\" Get", "@dataclass class CondenserCollator(DataCollatorForWholeWordMask): max_seq_length: int = 512 def __post_init__(self): super(CondenserCollator, self).__post_init__() from transformers", "covered_indexes = set() for index_set in cand_indexes: if len(masked_lms) >= num_to_predict: break #", "inputs, labels = self.mask_tokens( torch.tensor(encoded_examples, dtype=torch.long), torch.tensor(mlm_masks, dtype=torch.long) ) batch = { \"input_ids\":", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "permissions and # limitations under the License. import random from dataclasses import dataclass", "else: cand_indexes.append([i]) return cand_indexes def _whole_word_mask(self, input_tokens: List[str], max_predictions=512): \"\"\" Get 0/1 labels", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "continue is_any_index_covered = False for index in index_set: if index in covered_indexes: is_any_index_covered", "range(len(input_tokens))] return mask_labels def _truncate(self, example: List[int]): tgt_len = self.max_seq_length - self.tokenizer.num_special_tokens_to_add(False) if", "labels, \"attention_mask\": torch.tensor(masks), } return batch @dataclass class CoCondenserCollator(CondenserCollator): def __call__(self, examples): examples", "_whole_word_cand_indexes_roberta else: raise NotImplementedError(f'{type(self.tokenizer)} collator not supported yet') self.specials = self.tokenizer.all_special_tokens def _whole_word_cand_indexes_bert(self,", "in enumerate(input_tokens): if token in self.specials: raise ValueError('We expect only raw input for", "transformers import RobertaTokenizer, RobertaTokenizerFast if isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)): self.whole_word_cand_indexes = self._whole_word_cand_indexes_bert elif isinstance(self.tokenizer,", "in index_set: covered_indexes.add(index) masked_lms.append(index) assert len(covered_indexes) == len(masked_lms) mask_labels = [1 if i", "self.tokenizer.encode_plus( self._truncate(e['text']), add_special_tokens=True, max_length=self.max_seq_length, padding=\"max_length\", truncation=True, return_token_type_ids=False, ) masks.append(encoded['attention_mask']) encoded_examples.append(encoded['input_ids']) inputs, labels =", "len(masked_lms) >= num_to_predict: break # If adding a whole-word mask would exceed the", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "torch from torch.utils.data import Dataset from transformers import DataCollatorForWholeWordMask @dataclass class CondenserCollator(DataCollatorForWholeWordMask): max_seq_length:", "max_length=self.max_seq_length, padding=\"max_length\", truncation=True, return_token_type_ids=False, ) masks.append(encoded['attention_mask']) encoded_examples.append(encoded['input_ids']) inputs, labels = self.mask_tokens( torch.tensor(encoded_examples, dtype=torch.long),", "batch = { \"input_ids\": inputs, \"labels\": labels, \"attention_mask\": torch.tensor(masks), } return batch @dataclass", "masked tokens with whole word mask proxy \"\"\" cand_indexes = self._whole_word_cand_indexes_bert(input_tokens) random.shuffle(cand_indexes) num_to_predict", "512 def __post_init__(self): super(CondenserCollator, self).__post_init__() from transformers import BertTokenizer, BertTokenizerFast from transformers import", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "raise ValueError('We expect only raw input for roberta for current implementation') if i", "def __init__(self, dataset, data_args): self.dataset = dataset self.data_args = data_args def __len__(self): return", "whole-word mask would exceed the maximum number of # predictions, then just skip", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "List, Dict import torch from torch.utils.data import Dataset from transformers import DataCollatorForWholeWordMask @dataclass", "truncated = truncated[:-trunc_right] if not len(truncated) == tgt_len: print(len(example), len(truncated), trunc_left, trunc_right, tgt_len,", "mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]", "2021 Condenser Author All rights reserved. # # Licensed under the Apache License,", "required by applicable law or agreed to in writing, software # distributed under", "dtype=torch.long) ) batch = { \"input_ids\": inputs, \"labels\": labels, \"attention_mask\": torch.tensor(masks), } return", "index_set in cand_indexes: if len(masked_lms) >= num_to_predict: break # If adding a whole-word", "if len(example) <= tgt_len: return example trunc = len(example) - tgt_len trunc_left =", "applicable law or agreed to in writing, software # distributed under the License", "dataclass from typing import List, Dict import torch from torch.utils.data import Dataset from", "val=0): tgt_len = self.max_seq_length assert len(seq) <= tgt_len return seq + [val for", "index in covered_indexes: is_any_index_covered = True break if is_any_index_covered: continue for index in", "self._whole_word_cand_indexes_bert(input_tokens) random.shuffle(cand_indexes) num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability)))) masked_lms = [] covered_indexes", "class CondenserCollator(DataCollatorForWholeWordMask): max_seq_length: int = 512 def __post_init__(self): super(CondenserCollator, self).__post_init__() from transformers import", "= trunc - trunc_left truncated = example[trunc_left:] if trunc_right > 0: truncated =", "self.max_seq_length assert len(seq) <= tgt_len return seq + [val for _ in range(tgt_len", "covered_indexes: is_any_index_covered = True break if is_any_index_covered: continue for index in index_set: covered_indexes.add(index)", "examples): examples = sum(examples, []) examples = [{'text': e} for e in examples]", "or agreed to in writing, software # distributed under the License is distributed", "- len(seq))] def __call__(self, examples: List[Dict[str, List[int]]]): encoded_examples = [] masks = []", "tgt_len return seq + [val for _ in range(tgt_len - len(seq))] def __call__(self,", "> num_to_predict: continue is_any_index_covered = False for index in index_set: if index in", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "= len(example) - tgt_len trunc_left = random.randint(0, trunc) trunc_right = trunc - trunc_left", "All rights reserved. # # Licensed under the Apache License, Version 2.0 (the", "i == 0: cand_indexes.append([0]) elif not token.startswith('\\u0120'): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def", "raw input for roberta for current implementation') if i == 0: cand_indexes.append([0]) elif", "for _ in range(tgt_len - len(seq))] def __call__(self, examples: List[Dict[str, List[int]]]): encoded_examples =", "trunc_right = trunc - trunc_left truncated = example[trunc_left:] if trunc_right > 0: truncated", "for masked tokens with whole word mask proxy \"\"\" cand_indexes = self._whole_word_cand_indexes_bert(input_tokens) random.shuffle(cand_indexes)", "ValueError('We expect only raw input for roberta for current implementation') if i ==", "__init__(self, dataset, data_args): self.dataset = dataset self.data_args = data_args def __len__(self): return len(self.dataset)", "RobertaTokenizerFast if isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)): self.whole_word_cand_indexes = self._whole_word_cand_indexes_bert elif isinstance(self.tokenizer, (RobertaTokenizer, RobertaTokenizerFast)): self.whole_word_cand_indexes", "min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability)))) masked_lms = [] covered_indexes = set() for index_set", "tid in e_trunc] mlm_mask = self._whole_word_mask(tokens) mlm_mask = self._pad([0] + mlm_mask) mlm_masks.append(mlm_mask) encoded", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "assert len(covered_indexes) == len(masked_lms) mask_labels = [1 if i in covered_indexes else 0", "self._whole_word_mask(tokens) mlm_mask = self._pad([0] + mlm_mask) mlm_masks.append(mlm_mask) encoded = self.tokenizer.encode_plus( self._truncate(e['text']), add_special_tokens=True, max_length=self.max_seq_length,", "False for index in index_set: if index in covered_indexes: is_any_index_covered = True break", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "encoded = self.tokenizer.encode_plus( self._truncate(e['text']), add_special_tokens=True, max_length=self.max_seq_length, padding=\"max_length\", truncation=True, return_token_type_ids=False, ) masks.append(encoded['attention_mask']) encoded_examples.append(encoded['input_ids']) inputs,", "@dataclass class CoCondenserCollator(CondenserCollator): def __call__(self, examples): examples = sum(examples, []) examples = [{'text':", "[]) examples = [{'text': e} for e in examples] return super(CoCondenserCollator, self).__call__(examples) class", "def __call__(self, examples: List[Dict[str, List[int]]]): encoded_examples = [] masks = [] mlm_masks =", "[] mlm_masks = [] for e in examples: e_trunc = self._truncate(e['text']) tokens =", "License. # You may obtain a copy of the License at # #", "return cand_indexes def _whole_word_cand_indexes_roberta(self, input_tokens: List[str]): cand_indexes = [] for (i, token) in", "return batch @dataclass class CoCondenserCollator(CondenserCollator): def __call__(self, examples): examples = sum(examples, []) examples", "compliance with the License. # You may obtain a copy of the License", "mlm_masks.append(mlm_mask) encoded = self.tokenizer.encode_plus( self._truncate(e['text']), add_special_tokens=True, max_length=self.max_seq_length, padding=\"max_length\", truncation=True, return_token_type_ids=False, ) masks.append(encoded['attention_mask']) encoded_examples.append(encoded['input_ids'])", "= data_args def __len__(self): return len(self.dataset) def __getitem__(self, item): spans = self.dataset[item]['spans'] return", "cand_indexes = [] for (i, token) in enumerate(input_tokens): if token in self.specials: raise", "<= tgt_len return seq + [val for _ in range(tgt_len - len(seq))] def", "elif not token.startswith('\\u0120'): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):", "if i == 0: cand_indexes.append([0]) elif not token.startswith('\\u0120'): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes", "labels for masked tokens with whole word mask proxy \"\"\" cand_indexes = self._whole_word_cand_indexes_bert(input_tokens)", "index_set: covered_indexes.add(index) masked_lms.append(index) assert len(covered_indexes) == len(masked_lms) mask_labels = [1 if i in", "only raw input for roberta for current implementation') if i == 0: cand_indexes.append([0])", "for the specific language governing permissions and # limitations under the License. import", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "# predictions, then just skip this candidate. if len(masked_lms) + len(index_set) > num_to_predict:", "in covered_indexes: is_any_index_covered = True break if is_any_index_covered: continue for index in index_set:", "True break if is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_lms.append(index) assert len(covered_indexes)", "for (i, token) in enumerate(input_tokens): if token in self.specials: raise ValueError('We expect only", "self.max_seq_length - self.tokenizer.num_special_tokens_to_add(False) if len(example) <= tgt_len: return example trunc = len(example) -", "range(tgt_len - len(seq))] def __call__(self, examples: List[Dict[str, List[int]]]): encoded_examples = [] masks =", "examples] return super(CoCondenserCollator, self).__call__(examples) class CoCondenserDataset(Dataset): def __init__(self, dataset, data_args): self.dataset = dataset", "random.randint(0, trunc) trunc_right = trunc - trunc_left truncated = example[trunc_left:] if trunc_right >", "e_trunc = self._truncate(e['text']) tokens = [self.tokenizer._convert_id_to_token(tid) for tid in e_trunc] mlm_mask = self._whole_word_mask(tokens)", "flush=True) raise ValueError return truncated def _pad(self, seq, val=0): tgt_len = self.max_seq_length assert", "Author All rights reserved. # # Licensed under the Apache License, Version 2.0", "not use this file except in compliance with the License. # You may", "max(1, int(round(len(input_tokens) * self.mlm_probability)))) masked_lms = [] covered_indexes = set() for index_set in", "= [1 if i in covered_indexes else 0 for i in range(len(input_tokens))] return", "governing permissions and # limitations under the License. import random from dataclasses import", ">= 1 and token.startswith(\"##\"): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def _whole_word_cand_indexes_roberta(self, input_tokens: List[str]):", "covered_indexes else 0 for i in range(len(input_tokens))] return mask_labels def _truncate(self, example: List[int]):", "mlm_mask) mlm_masks.append(mlm_mask) encoded = self.tokenizer.encode_plus( self._truncate(e['text']), add_special_tokens=True, max_length=self.max_seq_length, padding=\"max_length\", truncation=True, return_token_type_ids=False, ) masks.append(encoded['attention_mask'])", "token) in enumerate(input_tokens): if token in self.specials: continue if len(cand_indexes) >= 1 and", "License, Version 2.0 (the \"License\"); # you may not use this file except", "covered_indexes.add(index) masked_lms.append(index) assert len(covered_indexes) == len(masked_lms) mask_labels = [1 if i in covered_indexes", "from transformers import BertTokenizer, BertTokenizerFast from transformers import RobertaTokenizer, RobertaTokenizerFast if isinstance(self.tokenizer, (BertTokenizer,", "0: truncated = truncated[:-trunc_right] if not len(truncated) == tgt_len: print(len(example), len(truncated), trunc_left, trunc_right,", "super(CondenserCollator, self).__post_init__() from transformers import BertTokenizer, BertTokenizerFast from transformers import RobertaTokenizer, RobertaTokenizerFast if", "If adding a whole-word mask would exceed the maximum number of # predictions,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "examples = [{'text': e} for e in examples] return super(CoCondenserCollator, self).__call__(examples) class CoCondenserDataset(Dataset):", "def _whole_word_mask(self, input_tokens: List[str], max_predictions=512): \"\"\" Get 0/1 labels for masked tokens with", "- self.tokenizer.num_special_tokens_to_add(False) if len(example) <= tgt_len: return example trunc = len(example) - tgt_len", "# you may not use this file except in compliance with the License.", "dataclasses import dataclass from typing import List, Dict import torch from torch.utils.data import", "[] masks = [] mlm_masks = [] for e in examples: e_trunc =", "agreed to in writing, software # distributed under the License is distributed on", "= self._whole_word_cand_indexes_bert elif isinstance(self.tokenizer, (RobertaTokenizer, RobertaTokenizerFast)): self.whole_word_cand_indexes = self. _whole_word_cand_indexes_roberta else: raise NotImplementedError(f'{type(self.tokenizer)}", "(the \"License\"); # you may not use this file except in compliance with", "in self.specials: raise ValueError('We expect only raw input for roberta for current implementation')", "cand_indexes.append([i]) return cand_indexes def _whole_word_cand_indexes_roberta(self, input_tokens: List[str]): cand_indexes = [] for (i, token)", "number of # predictions, then just skip this candidate. if len(masked_lms) + len(index_set)", "trunc_left, trunc_right, tgt_len, flush=True) raise ValueError return truncated def _pad(self, seq, val=0): tgt_len", "examples: e_trunc = self._truncate(e['text']) tokens = [self.tokenizer._convert_id_to_token(tid) for tid in e_trunc] mlm_mask =", "} return batch @dataclass class CoCondenserCollator(CondenserCollator): def __call__(self, examples): examples = sum(examples, [])", "token.startswith(\"##\"): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def _whole_word_cand_indexes_roberta(self, input_tokens: List[str]): cand_indexes = []", "= [] for (i, token) in enumerate(input_tokens): if token in self.specials: continue if", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "return example trunc = len(example) - tgt_len trunc_left = random.randint(0, trunc) trunc_right =", "= min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability)))) masked_lms = [] covered_indexes = set() for", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "max_seq_length: int = 512 def __post_init__(self): super(CondenserCollator, self).__post_init__() from transformers import BertTokenizer, BertTokenizerFast", "\"\"\" cand_indexes = self._whole_word_cand_indexes_bert(input_tokens) random.shuffle(cand_indexes) num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability)))) masked_lms", "in covered_indexes else 0 for i in range(len(input_tokens))] return mask_labels def _truncate(self, example:", "= False for index in index_set: if index in covered_indexes: is_any_index_covered = True", ") masks.append(encoded['attention_mask']) encoded_examples.append(encoded['input_ids']) inputs, labels = self.mask_tokens( torch.tensor(encoded_examples, dtype=torch.long), torch.tensor(mlm_masks, dtype=torch.long) ) batch", "len(covered_indexes) == len(masked_lms) mask_labels = [1 if i in covered_indexes else 0 for", "def _pad(self, seq, val=0): tgt_len = self.max_seq_length assert len(seq) <= tgt_len return seq", "tgt_len trunc_left = random.randint(0, trunc) trunc_right = trunc - trunc_left truncated = example[trunc_left:]", "file except in compliance with the License. # You may obtain a copy", "continue if len(cand_indexes) >= 1 and token.startswith(\"##\"): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def", "import random from dataclasses import dataclass from typing import List, Dict import torch", "example[trunc_left:] if trunc_right > 0: truncated = truncated[:-trunc_right] if not len(truncated) == tgt_len:", "License for the specific language governing permissions and # limitations under the License.", "def _truncate(self, example: List[int]): tgt_len = self.max_seq_length - self.tokenizer.num_special_tokens_to_add(False) if len(example) <= tgt_len:", "dataset, data_args): self.dataset = dataset self.data_args = data_args def __len__(self): return len(self.dataset) def", "mlm_mask = self._whole_word_mask(tokens) mlm_mask = self._pad([0] + mlm_mask) mlm_masks.append(mlm_mask) encoded = self.tokenizer.encode_plus( self._truncate(e['text']),", "self.dataset = dataset self.data_args = data_args def __len__(self): return len(self.dataset) def __getitem__(self, item):", "to in writing, software # distributed under the License is distributed on an", "example: List[int]): tgt_len = self.max_seq_length - self.tokenizer.num_special_tokens_to_add(False) if len(example) <= tgt_len: return example", "> 0: truncated = truncated[:-trunc_right] if not len(truncated) == tgt_len: print(len(example), len(truncated), trunc_left,", "enumerate(input_tokens): if token in self.specials: raise ValueError('We expect only raw input for roberta", "implied. # See the License for the specific language governing permissions and #", "self._pad([0] + mlm_mask) mlm_masks.append(mlm_mask) encoded = self.tokenizer.encode_plus( self._truncate(e['text']), add_special_tokens=True, max_length=self.max_seq_length, padding=\"max_length\", truncation=True, return_token_type_ids=False,", "\"License\"); # you may not use this file except in compliance with the", "class CoCondenserDataset(Dataset): def __init__(self, dataset, data_args): self.dataset = dataset self.data_args = data_args def", "= self. _whole_word_cand_indexes_roberta else: raise NotImplementedError(f'{type(self.tokenizer)} collator not supported yet') self.specials = self.tokenizer.all_special_tokens", "= self.max_seq_length - self.tokenizer.num_special_tokens_to_add(False) if len(example) <= tgt_len: return example trunc = len(example)", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "index in index_set: covered_indexes.add(index) masked_lms.append(index) assert len(covered_indexes) == len(masked_lms) mask_labels = [1 if", "roberta for current implementation') if i == 0: cand_indexes.append([0]) elif not token.startswith('\\u0120'): cand_indexes[-1].append(i)", "Dict import torch from torch.utils.data import Dataset from transformers import DataCollatorForWholeWordMask @dataclass class", "tokens with whole word mask proxy \"\"\" cand_indexes = self._whole_word_cand_indexes_bert(input_tokens) random.shuffle(cand_indexes) num_to_predict =", "self).__call__(examples) class CoCondenserDataset(Dataset): def __init__(self, dataset, data_args): self.dataset = dataset self.data_args = data_args", "num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability)))) masked_lms = [] covered_indexes = set()", "masks = [] mlm_masks = [] for e in examples: e_trunc = self._truncate(e['text'])", "specific language governing permissions and # limitations under the License. import random from", "List[int]): tgt_len = self.max_seq_length - self.tokenizer.num_special_tokens_to_add(False) if len(example) <= tgt_len: return example trunc", "or implied. # See the License for the specific language governing permissions and", "_pad(self, seq, val=0): tgt_len = self.max_seq_length assert len(seq) <= tgt_len return seq +", "int = 512 def __post_init__(self): super(CondenserCollator, self).__post_init__() from transformers import BertTokenizer, BertTokenizerFast from", "len(example) <= tgt_len: return example trunc = len(example) - tgt_len trunc_left = random.randint(0,", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "if token in self.specials: continue if len(cand_indexes) >= 1 and token.startswith(\"##\"): cand_indexes[-1].append(i) else:", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "= self.max_seq_length assert len(seq) <= tgt_len return seq + [val for _ in", "is_any_index_covered = True break if is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_lms.append(index)", "supported yet') self.specials = self.tokenizer.all_special_tokens def _whole_word_cand_indexes_bert(self, input_tokens: List[str]): cand_indexes = [] for", "mlm_mask = self._pad([0] + mlm_mask) mlm_masks.append(mlm_mask) encoded = self.tokenizer.encode_plus( self._truncate(e['text']), add_special_tokens=True, max_length=self.max_seq_length, padding=\"max_length\",", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "(BertTokenizer, BertTokenizerFast)): self.whole_word_cand_indexes = self._whole_word_cand_indexes_bert elif isinstance(self.tokenizer, (RobertaTokenizer, RobertaTokenizerFast)): self.whole_word_cand_indexes = self. _whole_word_cand_indexes_roberta", "1 and token.startswith(\"##\"): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def _whole_word_cand_indexes_roberta(self, input_tokens: List[str]): cand_indexes", "index_set: if index in covered_indexes: is_any_index_covered = True break if is_any_index_covered: continue for", "in writing, software # distributed under the License is distributed on an \"AS", "self.specials = self.tokenizer.all_special_tokens def _whole_word_cand_indexes_bert(self, input_tokens: List[str]): cand_indexes = [] for (i, token)", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "DataCollatorForWholeWordMask @dataclass class CondenserCollator(DataCollatorForWholeWordMask): max_seq_length: int = 512 def __post_init__(self): super(CondenserCollator, self).__post_init__() from", "the specific language governing permissions and # limitations under the License. import random", "RobertaTokenizerFast)): self.whole_word_cand_indexes = self. _whole_word_cand_indexes_roberta else: raise NotImplementedError(f'{type(self.tokenizer)} collator not supported yet') self.specials", "data_args): self.dataset = dataset self.data_args = data_args def __len__(self): return len(self.dataset) def __getitem__(self,", "= True break if is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_lms.append(index) assert", "[] for (i, token) in enumerate(input_tokens): if token in self.specials: continue if len(cand_indexes)", "in examples] return super(CoCondenserCollator, self).__call__(examples) class CoCondenserDataset(Dataset): def __init__(self, dataset, data_args): self.dataset =", "adding a whole-word mask would exceed the maximum number of # predictions, then", "if len(masked_lms) >= num_to_predict: break # If adding a whole-word mask would exceed", "add_special_tokens=True, max_length=self.max_seq_length, padding=\"max_length\", truncation=True, return_token_type_ids=False, ) masks.append(encoded['attention_mask']) encoded_examples.append(encoded['input_ids']) inputs, labels = self.mask_tokens( torch.tensor(encoded_examples,", "trunc) trunc_right = trunc - trunc_left truncated = example[trunc_left:] if trunc_right > 0:", "e in examples] return super(CoCondenserCollator, self).__call__(examples) class CoCondenserDataset(Dataset): def __init__(self, dataset, data_args): self.dataset", "set() for index_set in cand_indexes: if len(masked_lms) >= num_to_predict: break # If adding", "- tgt_len trunc_left = random.randint(0, trunc) trunc_right = trunc - trunc_left truncated =", "token in self.specials: continue if len(cand_indexes) >= 1 and token.startswith(\"##\"): cand_indexes[-1].append(i) else: cand_indexes.append([i])", "= { \"input_ids\": inputs, \"labels\": labels, \"attention_mask\": torch.tensor(masks), } return batch @dataclass class", "CoCondenserDataset(Dataset): def __init__(self, dataset, data_args): self.dataset = dataset self.data_args = data_args def __len__(self):", "0: cand_indexes.append([0]) elif not token.startswith('\\u0120'): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def _whole_word_mask(self, input_tokens:", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "from dataclasses import dataclass from typing import List, Dict import torch from torch.utils.data", "= [] mlm_masks = [] for e in examples: e_trunc = self._truncate(e['text']) tokens", "you may not use this file except in compliance with the License. #", "torch.tensor(masks), } return batch @dataclass class CoCondenserCollator(CondenserCollator): def __call__(self, examples): examples = sum(examples,", "<= tgt_len: return example trunc = len(example) - tgt_len trunc_left = random.randint(0, trunc)", "with whole word mask proxy \"\"\" cand_indexes = self._whole_word_cand_indexes_bert(input_tokens) random.shuffle(cand_indexes) num_to_predict = min(max_predictions,", "e} for e in examples] return super(CoCondenserCollator, self).__call__(examples) class CoCondenserDataset(Dataset): def __init__(self, dataset,", "encoded_examples.append(encoded['input_ids']) inputs, labels = self.mask_tokens( torch.tensor(encoded_examples, dtype=torch.long), torch.tensor(mlm_masks, dtype=torch.long) ) batch = {", "__call__(self, examples: List[Dict[str, List[int]]]): encoded_examples = [] masks = [] mlm_masks = []", "labels = self.mask_tokens( torch.tensor(encoded_examples, dtype=torch.long), torch.tensor(mlm_masks, dtype=torch.long) ) batch = { \"input_ids\": inputs,", "(i, token) in enumerate(input_tokens): if token in self.specials: continue if len(cand_indexes) >= 1", "mask would exceed the maximum number of # predictions, then just skip this", "= [] covered_indexes = set() for index_set in cand_indexes: if len(masked_lms) >= num_to_predict:", "from transformers import RobertaTokenizer, RobertaTokenizerFast if isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)): self.whole_word_cand_indexes = self._whole_word_cand_indexes_bert elif", "cand_indexes = self._whole_word_cand_indexes_bert(input_tokens) random.shuffle(cand_indexes) num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability)))) masked_lms =", "- trunc_left truncated = example[trunc_left:] if trunc_right > 0: truncated = truncated[:-trunc_right] if", "use this file except in compliance with the License. # You may obtain", "isinstance(self.tokenizer, (RobertaTokenizer, RobertaTokenizerFast)): self.whole_word_cand_indexes = self. _whole_word_cand_indexes_roberta else: raise NotImplementedError(f'{type(self.tokenizer)} collator not supported", "break # If adding a whole-word mask would exceed the maximum number of", "in index_set: if index in covered_indexes: is_any_index_covered = True break if is_any_index_covered: continue", "return mask_labels def _truncate(self, example: List[int]): tgt_len = self.max_seq_length - self.tokenizer.num_special_tokens_to_add(False) if len(example)", "trunc = len(example) - tgt_len trunc_left = random.randint(0, trunc) trunc_right = trunc -", "ValueError return truncated def _pad(self, seq, val=0): tgt_len = self.max_seq_length assert len(seq) <=", "seq + [val for _ in range(tgt_len - len(seq))] def __call__(self, examples: List[Dict[str,", "RobertaTokenizer, RobertaTokenizerFast if isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)): self.whole_word_cand_indexes = self._whole_word_cand_indexes_bert elif isinstance(self.tokenizer, (RobertaTokenizer, RobertaTokenizerFast)):", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "if token in self.specials: raise ValueError('We expect only raw input for roberta for", "= [self.tokenizer._convert_id_to_token(tid) for tid in e_trunc] mlm_mask = self._whole_word_mask(tokens) mlm_mask = self._pad([0] +", "input_tokens: List[str], max_predictions=512): \"\"\" Get 0/1 labels for masked tokens with whole word", "in self.specials: continue if len(cand_indexes) >= 1 and token.startswith(\"##\"): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return", "# Copyright 2021 Condenser Author All rights reserved. # # Licensed under the", "in e_trunc] mlm_mask = self._whole_word_mask(tokens) mlm_mask = self._pad([0] + mlm_mask) mlm_masks.append(mlm_mask) encoded =", "__post_init__(self): super(CondenserCollator, self).__post_init__() from transformers import BertTokenizer, BertTokenizerFast from transformers import RobertaTokenizer, RobertaTokenizerFast", "break if is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_lms.append(index) assert len(covered_indexes) ==", "len(masked_lms) + len(index_set) > num_to_predict: continue is_any_index_covered = False for index in index_set:", "(RobertaTokenizer, RobertaTokenizerFast)): self.whole_word_cand_indexes = self. _whole_word_cand_indexes_roberta else: raise NotImplementedError(f'{type(self.tokenizer)} collator not supported yet')", "= example[trunc_left:] if trunc_right > 0: truncated = truncated[:-trunc_right] if not len(truncated) ==", "enumerate(input_tokens): if token in self.specials: continue if len(cand_indexes) >= 1 and token.startswith(\"##\"): cand_indexes[-1].append(i)", "2.0 (the \"License\"); # you may not use this file except in compliance", "index in index_set: if index in covered_indexes: is_any_index_covered = True break if is_any_index_covered:", "else: raise NotImplementedError(f'{type(self.tokenizer)} collator not supported yet') self.specials = self.tokenizer.all_special_tokens def _whole_word_cand_indexes_bert(self, input_tokens:", "_whole_word_cand_indexes_bert(self, input_tokens: List[str]): cand_indexes = [] for (i, token) in enumerate(input_tokens): if token", "exceed the maximum number of # predictions, then just skip this candidate. if", "self.data_args = data_args def __len__(self): return len(self.dataset) def __getitem__(self, item): spans = self.dataset[item]['spans']", "example trunc = len(example) - tgt_len trunc_left = random.randint(0, trunc) trunc_right = trunc", "just skip this candidate. if len(masked_lms) + len(index_set) > num_to_predict: continue is_any_index_covered =", "[1 if i in covered_indexes else 0 for i in range(len(input_tokens))] return mask_labels", "is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_lms.append(index) assert len(covered_indexes) == len(masked_lms) mask_labels", "e in examples: e_trunc = self._truncate(e['text']) tokens = [self.tokenizer._convert_id_to_token(tid) for tid in e_trunc]", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "masked_lms = [] covered_indexes = set() for index_set in cand_indexes: if len(masked_lms) >=", "_truncate(self, example: List[int]): tgt_len = self.max_seq_length - self.tokenizer.num_special_tokens_to_add(False) if len(example) <= tgt_len: return", "_whole_word_mask(self, input_tokens: List[str], max_predictions=512): \"\"\" Get 0/1 labels for masked tokens with whole", "# # Unless required by applicable law or agreed to in writing, software", "# limitations under the License. import random from dataclasses import dataclass from typing", "express or implied. # See the License for the specific language governing permissions", "== len(masked_lms) mask_labels = [1 if i in covered_indexes else 0 for i", "print(len(example), len(truncated), trunc_left, trunc_right, tgt_len, flush=True) raise ValueError return truncated def _pad(self, seq,", "expect only raw input for roberta for current implementation') if i == 0:", "import RobertaTokenizer, RobertaTokenizerFast if isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)): self.whole_word_cand_indexes = self._whole_word_cand_indexes_bert elif isinstance(self.tokenizer, (RobertaTokenizer,", "[] for (i, token) in enumerate(input_tokens): if token in self.specials: raise ValueError('We expect", "in range(len(input_tokens))] return mask_labels def _truncate(self, example: List[int]): tgt_len = self.max_seq_length - self.tokenizer.num_special_tokens_to_add(False)", "self.whole_word_cand_indexes = self. _whole_word_cand_indexes_roberta else: raise NotImplementedError(f'{type(self.tokenizer)} collator not supported yet') self.specials =", "import torch from torch.utils.data import Dataset from transformers import DataCollatorForWholeWordMask @dataclass class CondenserCollator(DataCollatorForWholeWordMask):", "isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)): self.whole_word_cand_indexes = self._whole_word_cand_indexes_bert elif isinstance(self.tokenizer, (RobertaTokenizer, RobertaTokenizerFast)): self.whole_word_cand_indexes = self.", "either express or implied. # See the License for the specific language governing", "proxy \"\"\" cand_indexes = self._whole_word_cand_indexes_bert(input_tokens) random.shuffle(cand_indexes) num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))", "0 for i in range(len(input_tokens))] return mask_labels def _truncate(self, example: List[int]): tgt_len =", "then just skip this candidate. if len(masked_lms) + len(index_set) > num_to_predict: continue is_any_index_covered", "= self._pad([0] + mlm_mask) mlm_masks.append(mlm_mask) encoded = self.tokenizer.encode_plus( self._truncate(e['text']), add_special_tokens=True, max_length=self.max_seq_length, padding=\"max_length\", truncation=True,", "return super(CoCondenserCollator, self).__call__(examples) class CoCondenserDataset(Dataset): def __init__(self, dataset, data_args): self.dataset = dataset self.data_args", "self._whole_word_cand_indexes_bert elif isinstance(self.tokenizer, (RobertaTokenizer, RobertaTokenizerFast)): self.whole_word_cand_indexes = self. _whole_word_cand_indexes_roberta else: raise NotImplementedError(f'{type(self.tokenizer)} collator", "len(example) - tgt_len trunc_left = random.randint(0, trunc) trunc_right = trunc - trunc_left truncated", "i in covered_indexes else 0 for i in range(len(input_tokens))] return mask_labels def _truncate(self,", "len(truncated), trunc_left, trunc_right, tgt_len, flush=True) raise ValueError return truncated def _pad(self, seq, val=0):", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "examples = sum(examples, []) examples = [{'text': e} for e in examples] return", "= set() for index_set in cand_indexes: if len(masked_lms) >= num_to_predict: break # If", "cand_indexes: if len(masked_lms) >= num_to_predict: break # If adding a whole-word mask would", "= self.tokenizer.all_special_tokens def _whole_word_cand_indexes_bert(self, input_tokens: List[str]): cand_indexes = [] for (i, token) in", "List[str], max_predictions=512): \"\"\" Get 0/1 labels for masked tokens with whole word mask", "not token.startswith('\\u0120'): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def _whole_word_mask(self, input_tokens: List[str], max_predictions=512): \"\"\"", "whole word mask proxy \"\"\" cand_indexes = self._whole_word_cand_indexes_bert(input_tokens) random.shuffle(cand_indexes) num_to_predict = min(max_predictions, max(1,", "torch.tensor(mlm_masks, dtype=torch.long) ) batch = { \"input_ids\": inputs, \"labels\": labels, \"attention_mask\": torch.tensor(masks), }", "under the License. import random from dataclasses import dataclass from typing import List,", "the License. # You may obtain a copy of the License at #", "raise ValueError return truncated def _pad(self, seq, val=0): tgt_len = self.max_seq_length assert len(seq)", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "typing import List, Dict import torch from torch.utils.data import Dataset from transformers import", "len(seq))] def __call__(self, examples: List[Dict[str, List[int]]]): encoded_examples = [] masks = [] mlm_masks", "= [{'text': e} for e in examples] return super(CoCondenserCollator, self).__call__(examples) class CoCondenserDataset(Dataset): def", "torch.utils.data import Dataset from transformers import DataCollatorForWholeWordMask @dataclass class CondenserCollator(DataCollatorForWholeWordMask): max_seq_length: int =", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "\"labels\": labels, \"attention_mask\": torch.tensor(masks), } return batch @dataclass class CoCondenserCollator(CondenserCollator): def __call__(self, examples):", "Condenser Author All rights reserved. # # Licensed under the Apache License, Version", "= sum(examples, []) examples = [{'text': e} for e in examples] return super(CoCondenserCollator,", "max_predictions=512): \"\"\" Get 0/1 labels for masked tokens with whole word mask proxy", "mask_labels def _truncate(self, example: List[int]): tgt_len = self.max_seq_length - self.tokenizer.num_special_tokens_to_add(False) if len(example) <=", "raise NotImplementedError(f'{type(self.tokenizer)} collator not supported yet') self.specials = self.tokenizer.all_special_tokens def _whole_word_cand_indexes_bert(self, input_tokens: List[str]):", "assert len(seq) <= tgt_len return seq + [val for _ in range(tgt_len -", "for (i, token) in enumerate(input_tokens): if token in self.specials: continue if len(cand_indexes) >=", "examples: List[Dict[str, List[int]]]): encoded_examples = [] masks = [] mlm_masks = [] for", "the License. import random from dataclasses import dataclass from typing import List, Dict", "trunc - trunc_left truncated = example[trunc_left:] if trunc_right > 0: truncated = truncated[:-trunc_right]", "+ mlm_mask) mlm_masks.append(mlm_mask) encoded = self.tokenizer.encode_plus( self._truncate(e['text']), add_special_tokens=True, max_length=self.max_seq_length, padding=\"max_length\", truncation=True, return_token_type_ids=False, )", "self._truncate(e['text']) tokens = [self.tokenizer._convert_id_to_token(tid) for tid in e_trunc] mlm_mask = self._whole_word_mask(tokens) mlm_mask =", "with the License. # You may obtain a copy of the License at", "if index in covered_indexes: is_any_index_covered = True break if is_any_index_covered: continue for index", "cand_indexes def _whole_word_mask(self, input_tokens: List[str], max_predictions=512): \"\"\" Get 0/1 labels for masked tokens", "= self.tokenizer.encode_plus( self._truncate(e['text']), add_special_tokens=True, max_length=self.max_seq_length, padding=\"max_length\", truncation=True, return_token_type_ids=False, ) masks.append(encoded['attention_mask']) encoded_examples.append(encoded['input_ids']) inputs, labels", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "cand_indexes.append([0]) elif not token.startswith('\\u0120'): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def _whole_word_mask(self, input_tokens: List[str],", "import dataclass from typing import List, Dict import torch from torch.utils.data import Dataset", "language governing permissions and # limitations under the License. import random from dataclasses", "if len(masked_lms) + len(index_set) > num_to_predict: continue is_any_index_covered = False for index in", "rights reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "seq, val=0): tgt_len = self.max_seq_length assert len(seq) <= tgt_len return seq + [val", "dataset self.data_args = data_args def __len__(self): return len(self.dataset) def __getitem__(self, item): spans =", "_whole_word_cand_indexes_roberta(self, input_tokens: List[str]): cand_indexes = [] for (i, token) in enumerate(input_tokens): if token", "i in range(len(input_tokens))] return mask_labels def _truncate(self, example: List[int]): tgt_len = self.max_seq_length -", "for index in index_set: covered_indexes.add(index) masked_lms.append(index) assert len(covered_indexes) == len(masked_lms) mask_labels = [1", "law or agreed to in writing, software # distributed under the License is", "is_any_index_covered = False for index in index_set: if index in covered_indexes: is_any_index_covered =", "the License for the specific language governing permissions and # limitations under the", "Copyright 2021 Condenser Author All rights reserved. # # Licensed under the Apache", "for roberta for current implementation') if i == 0: cand_indexes.append([0]) elif not token.startswith('\\u0120'):", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "super(CoCondenserCollator, self).__call__(examples) class CoCondenserDataset(Dataset): def __init__(self, dataset, data_args): self.dataset = dataset self.data_args =", "len(seq) <= tgt_len return seq + [val for _ in range(tgt_len - len(seq))]", "(i, token) in enumerate(input_tokens): if token in self.specials: raise ValueError('We expect only raw", "0/1 labels for masked tokens with whole word mask proxy \"\"\" cand_indexes =", "yet') self.specials = self.tokenizer.all_special_tokens def _whole_word_cand_indexes_bert(self, input_tokens: List[str]): cand_indexes = [] for (i,", "CondenserCollator(DataCollatorForWholeWordMask): max_seq_length: int = 512 def __post_init__(self): super(CondenserCollator, self).__post_init__() from transformers import BertTokenizer,", "from transformers import DataCollatorForWholeWordMask @dataclass class CondenserCollator(DataCollatorForWholeWordMask): max_seq_length: int = 512 def __post_init__(self):", "truncation=True, return_token_type_ids=False, ) masks.append(encoded['attention_mask']) encoded_examples.append(encoded['input_ids']) inputs, labels = self.mask_tokens( torch.tensor(encoded_examples, dtype=torch.long), torch.tensor(mlm_masks, dtype=torch.long)", "[val for _ in range(tgt_len - len(seq))] def __call__(self, examples: List[Dict[str, List[int]]]): encoded_examples", "and # limitations under the License. import random from dataclasses import dataclass from", "would exceed the maximum number of # predictions, then just skip this candidate.", "= [] for (i, token) in enumerate(input_tokens): if token in self.specials: raise ValueError('We", "masks.append(encoded['attention_mask']) encoded_examples.append(encoded['input_ids']) inputs, labels = self.mask_tokens( torch.tensor(encoded_examples, dtype=torch.long), torch.tensor(mlm_masks, dtype=torch.long) ) batch =", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "BertTokenizerFast)): self.whole_word_cand_indexes = self._whole_word_cand_indexes_bert elif isinstance(self.tokenizer, (RobertaTokenizer, RobertaTokenizerFast)): self.whole_word_cand_indexes = self. _whole_word_cand_indexes_roberta else:", "maximum number of # predictions, then just skip this candidate. if len(masked_lms) +", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "\"input_ids\": inputs, \"labels\": labels, \"attention_mask\": torch.tensor(masks), } return batch @dataclass class CoCondenserCollator(CondenserCollator): def", "a whole-word mask would exceed the maximum number of # predictions, then just", "else 0 for i in range(len(input_tokens))] return mask_labels def _truncate(self, example: List[int]): tgt_len", "dtype=torch.long), torch.tensor(mlm_masks, dtype=torch.long) ) batch = { \"input_ids\": inputs, \"labels\": labels, \"attention_mask\": torch.tensor(masks),", ">= num_to_predict: break # If adding a whole-word mask would exceed the maximum", "List[Dict[str, List[int]]]): encoded_examples = [] masks = [] mlm_masks = [] for e", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "and token.startswith(\"##\"): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def _whole_word_cand_indexes_roberta(self, input_tokens: List[str]): cand_indexes =", "return_token_type_ids=False, ) masks.append(encoded['attention_mask']) encoded_examples.append(encoded['input_ids']) inputs, labels = self.mask_tokens( torch.tensor(encoded_examples, dtype=torch.long), torch.tensor(mlm_masks, dtype=torch.long) )", "= truncated[:-trunc_right] if not len(truncated) == tgt_len: print(len(example), len(truncated), trunc_left, trunc_right, tgt_len, flush=True)", "[self.tokenizer._convert_id_to_token(tid) for tid in e_trunc] mlm_mask = self._whole_word_mask(tokens) mlm_mask = self._pad([0] + mlm_mask)", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "self.specials: continue if len(cand_indexes) >= 1 and token.startswith(\"##\"): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "the maximum number of # predictions, then just skip this candidate. if len(masked_lms)", "self.mask_tokens( torch.tensor(encoded_examples, dtype=torch.long), torch.tensor(mlm_masks, dtype=torch.long) ) batch = { \"input_ids\": inputs, \"labels\": labels,", "truncated[:-trunc_right] if not len(truncated) == tgt_len: print(len(example), len(truncated), trunc_left, trunc_right, tgt_len, flush=True) raise", "BertTokenizerFast from transformers import RobertaTokenizer, RobertaTokenizerFast if isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)): self.whole_word_cand_indexes = self._whole_word_cand_indexes_bert", "tgt_len = self.max_seq_length assert len(seq) <= tgt_len return seq + [val for _", "random.shuffle(cand_indexes) num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability)))) masked_lms = [] covered_indexes =", "trunc_left truncated = example[trunc_left:] if trunc_right > 0: truncated = truncated[:-trunc_right] if not", "trunc_right > 0: truncated = truncated[:-trunc_right] if not len(truncated) == tgt_len: print(len(example), len(truncated),", "self. _whole_word_cand_indexes_roberta else: raise NotImplementedError(f'{type(self.tokenizer)} collator not supported yet') self.specials = self.tokenizer.all_special_tokens def", "token in self.specials: raise ValueError('We expect only raw input for roberta for current", "NotImplementedError(f'{type(self.tokenizer)} collator not supported yet') self.specials = self.tokenizer.all_special_tokens def _whole_word_cand_indexes_bert(self, input_tokens: List[str]): cand_indexes", "skip this candidate. if len(masked_lms) + len(index_set) > num_to_predict: continue is_any_index_covered = False", "\"attention_mask\": torch.tensor(masks), } return batch @dataclass class CoCondenserCollator(CondenserCollator): def __call__(self, examples): examples =", "if i in covered_indexes else 0 for i in range(len(input_tokens))] return mask_labels def", "batch @dataclass class CoCondenserCollator(CondenserCollator): def __call__(self, examples): examples = sum(examples, []) examples =", "[] for e in examples: e_trunc = self._truncate(e['text']) tokens = [self.tokenizer._convert_id_to_token(tid) for tid", "trunc_right, tgt_len, flush=True) raise ValueError return truncated def _pad(self, seq, val=0): tgt_len =", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "collator not supported yet') self.specials = self.tokenizer.all_special_tokens def _whole_word_cand_indexes_bert(self, input_tokens: List[str]): cand_indexes =", "trunc_left = random.randint(0, trunc) trunc_right = trunc - trunc_left truncated = example[trunc_left:] if", "mask proxy \"\"\" cand_indexes = self._whole_word_cand_indexes_bert(input_tokens) random.shuffle(cand_indexes) num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) *", "+ len(index_set) > num_to_predict: continue is_any_index_covered = False for index in index_set: if", "* self.mlm_probability)))) masked_lms = [] covered_indexes = set() for index_set in cand_indexes: if", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "from typing import List, Dict import torch from torch.utils.data import Dataset from transformers", "self._truncate(e['text']), add_special_tokens=True, max_length=self.max_seq_length, padding=\"max_length\", truncation=True, return_token_type_ids=False, ) masks.append(encoded['attention_mask']) encoded_examples.append(encoded['input_ids']) inputs, labels = self.mask_tokens(", "cand_indexes[-1].append(i) else: cand_indexes.append([i]) return cand_indexes def _whole_word_mask(self, input_tokens: List[str], max_predictions=512): \"\"\" Get 0/1", "def __len__(self): return len(self.dataset) def __getitem__(self, item): spans = self.dataset[item]['spans'] return random.sample(spans, 2)", "not len(truncated) == tgt_len: print(len(example), len(truncated), trunc_left, trunc_right, tgt_len, flush=True) raise ValueError return", "self.specials: raise ValueError('We expect only raw input for roberta for current implementation') if", "{ \"input_ids\": inputs, \"labels\": labels, \"attention_mask\": torch.tensor(masks), } return batch @dataclass class CoCondenserCollator(CondenserCollator):", "tgt_len: print(len(example), len(truncated), trunc_left, trunc_right, tgt_len, flush=True) raise ValueError return truncated def _pad(self,", "CoCondenserCollator(CondenserCollator): def __call__(self, examples): examples = sum(examples, []) examples = [{'text': e} for", "for i in range(len(input_tokens))] return mask_labels def _truncate(self, example: List[int]): tgt_len = self.max_seq_length", "return cand_indexes def _whole_word_mask(self, input_tokens: List[str], max_predictions=512): \"\"\" Get 0/1 labels for masked", "num_to_predict: break # If adding a whole-word mask would exceed the maximum number", "__call__(self, examples): examples = sum(examples, []) examples = [{'text': e} for e in", "else: cand_indexes.append([i]) return cand_indexes def _whole_word_cand_indexes_roberta(self, input_tokens: List[str]): cand_indexes = [] for (i,", "License. import random from dataclasses import dataclass from typing import List, Dict import", "for index in index_set: if index in covered_indexes: is_any_index_covered = True break if", "def __post_init__(self): super(CondenserCollator, self).__post_init__() from transformers import BertTokenizer, BertTokenizerFast from transformers import RobertaTokenizer,", "implementation') if i == 0: cand_indexes.append([0]) elif not token.startswith('\\u0120'): cand_indexes[-1].append(i) else: cand_indexes.append([i]) return", "= random.randint(0, trunc) trunc_right = trunc - trunc_left truncated = example[trunc_left:] if trunc_right", "for e in examples] return super(CoCondenserCollator, self).__call__(examples) class CoCondenserDataset(Dataset): def __init__(self, dataset, data_args):", "self.mlm_probability)))) masked_lms = [] covered_indexes = set() for index_set in cand_indexes: if len(masked_lms)", "for tid in e_trunc] mlm_mask = self._whole_word_mask(tokens) mlm_mask = self._pad([0] + mlm_mask) mlm_masks.append(mlm_mask)", "if trunc_right > 0: truncated = truncated[:-trunc_right] if not len(truncated) == tgt_len: print(len(example),", "= [] for e in examples: e_trunc = self._truncate(e['text']) tokens = [self.tokenizer._convert_id_to_token(tid) for", "List[str]): cand_indexes = [] for (i, token) in enumerate(input_tokens): if token in self.specials:", "import List, Dict import torch from torch.utils.data import Dataset from transformers import DataCollatorForWholeWordMask", "= [] masks = [] mlm_masks = [] for e in examples: e_trunc", "tgt_len: return example trunc = len(example) - tgt_len trunc_left = random.randint(0, trunc) trunc_right", "= dataset self.data_args = data_args def __len__(self): return len(self.dataset) def __getitem__(self, item): spans", "input for roberta for current implementation') if i == 0: cand_indexes.append([0]) elif not", "transformers import BertTokenizer, BertTokenizerFast from transformers import RobertaTokenizer, RobertaTokenizerFast if isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):", "of # predictions, then just skip this candidate. if len(masked_lms) + len(index_set) >", "self).__post_init__() from transformers import BertTokenizer, BertTokenizerFast from transformers import RobertaTokenizer, RobertaTokenizerFast if isinstance(self.tokenizer,", "masked_lms.append(index) assert len(covered_indexes) == len(masked_lms) mask_labels = [1 if i in covered_indexes else", "truncated def _pad(self, seq, val=0): tgt_len = self.max_seq_length assert len(seq) <= tgt_len return", "== tgt_len: print(len(example), len(truncated), trunc_left, trunc_right, tgt_len, flush=True) raise ValueError return truncated def", "from torch.utils.data import Dataset from transformers import DataCollatorForWholeWordMask @dataclass class CondenserCollator(DataCollatorForWholeWordMask): max_seq_length: int", "if isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)): self.whole_word_cand_indexes = self._whole_word_cand_indexes_bert elif isinstance(self.tokenizer, (RobertaTokenizer, RobertaTokenizerFast)): self.whole_word_cand_indexes =", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", ") batch = { \"input_ids\": inputs, \"labels\": labels, \"attention_mask\": torch.tensor(masks), } return batch", "encoded_examples = [] masks = [] mlm_masks = [] for e in examples:", "transformers import DataCollatorForWholeWordMask @dataclass class CondenserCollator(DataCollatorForWholeWordMask): max_seq_length: int = 512 def __post_init__(self): super(CondenserCollator,", "inputs, \"labels\": labels, \"attention_mask\": torch.tensor(masks), } return batch @dataclass class CoCondenserCollator(CondenserCollator): def __call__(self,", "cand_indexes = [] for (i, token) in enumerate(input_tokens): if token in self.specials: continue", "for index_set in cand_indexes: if len(masked_lms) >= num_to_predict: break # If adding a", "\"\"\" Get 0/1 labels for masked tokens with whole word mask proxy \"\"\"" ]
[ "rest_framework.exceptions import APIException from core.exceptions import common_exception_handler def test_common_exception_handler_if_error_without_detail(mocker): exp = APIException({'data': 'test'})", "def test_common_exception_handler_if_error_is_string(mocker): exp = APIException(['testing error']) response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] ==", "exp = APIException(['testing error']) response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:' assert", "assert response.data['error_name'] == 'APIException' assert response.data['detail'] == {'data': 'test'} def test_common_exception_handler_if_error_is_string(mocker): exp =", "def test_common_exception_handler_if_error_without_detail(mocker): exp = APIException({'data': 'test'}) response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] ==", "test_common_exception_handler_if_error_is_string(mocker): exp = APIException(['testing error']) response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:'", "'unittest.mock.Mock:' assert response.data['error_name'] == 'APIException' assert response.data['detail'] == {'data': 'test'} def test_common_exception_handler_if_error_is_string(mocker): exp", "mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:' assert response.data['error_name'] == 'APIException' assert response.data['detail'] == ['testing", "{'data': 'test'} def test_common_exception_handler_if_error_is_string(mocker): exp = APIException(['testing error']) response = common_exception_handler(exp, mocker.Mock()) assert", "'test'}) response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:' assert response.data['error_name'] == 'APIException'", "common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:' assert response.data['error_name'] == 'APIException' assert response.data['detail'] ==", "from rest_framework.exceptions import APIException from core.exceptions import common_exception_handler def test_common_exception_handler_if_error_without_detail(mocker): exp = APIException({'data':", "'APIException' assert response.data['detail'] == {'data': 'test'} def test_common_exception_handler_if_error_is_string(mocker): exp = APIException(['testing error']) response", "assert response.data['service_name'] == 'unittest.mock.Mock:' assert response.data['error_name'] == 'APIException' assert response.data['detail'] == {'data': 'test'}", "response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:' assert response.data['error_name'] == 'APIException' assert", "= APIException({'data': 'test'}) response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:' assert response.data['error_name']", "== 'unittest.mock.Mock:' assert response.data['error_name'] == 'APIException' assert response.data['detail'] == {'data': 'test'} def test_common_exception_handler_if_error_is_string(mocker):", "response.data['error_name'] == 'APIException' assert response.data['detail'] == {'data': 'test'} def test_common_exception_handler_if_error_is_string(mocker): exp = APIException(['testing", "== {'data': 'test'} def test_common_exception_handler_if_error_is_string(mocker): exp = APIException(['testing error']) response = common_exception_handler(exp, mocker.Mock())", "'test'} def test_common_exception_handler_if_error_is_string(mocker): exp = APIException(['testing error']) response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name']", "APIException(['testing error']) response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:' assert response.data['error_name'] ==", "response.data['detail'] == {'data': 'test'} def test_common_exception_handler_if_error_is_string(mocker): exp = APIException(['testing error']) response = common_exception_handler(exp,", "assert response.data['detail'] == {'data': 'test'} def test_common_exception_handler_if_error_is_string(mocker): exp = APIException(['testing error']) response =", "import APIException from core.exceptions import common_exception_handler def test_common_exception_handler_if_error_without_detail(mocker): exp = APIException({'data': 'test'}) response", "APIException({'data': 'test'}) response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:' assert response.data['error_name'] ==", "common_exception_handler def test_common_exception_handler_if_error_without_detail(mocker): exp = APIException({'data': 'test'}) response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name']", "mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:' assert response.data['error_name'] == 'APIException' assert response.data['detail'] == {'data':", "== 'APIException' assert response.data['detail'] == {'data': 'test'} def test_common_exception_handler_if_error_is_string(mocker): exp = APIException(['testing error'])", "from core.exceptions import common_exception_handler def test_common_exception_handler_if_error_without_detail(mocker): exp = APIException({'data': 'test'}) response = common_exception_handler(exp,", "assert response.data['service_name'] == 'unittest.mock.Mock:' assert response.data['error_name'] == 'APIException' assert response.data['detail'] == ['testing error']", "error']) response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:' assert response.data['error_name'] == 'APIException'", "= APIException(['testing error']) response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:' assert response.data['error_name']", "import common_exception_handler def test_common_exception_handler_if_error_without_detail(mocker): exp = APIException({'data': 'test'}) response = common_exception_handler(exp, mocker.Mock()) assert", "APIException from core.exceptions import common_exception_handler def test_common_exception_handler_if_error_without_detail(mocker): exp = APIException({'data': 'test'}) response =", "response.data['service_name'] == 'unittest.mock.Mock:' assert response.data['error_name'] == 'APIException' assert response.data['detail'] == {'data': 'test'} def", "core.exceptions import common_exception_handler def test_common_exception_handler_if_error_without_detail(mocker): exp = APIException({'data': 'test'}) response = common_exception_handler(exp, mocker.Mock())", "exp = APIException({'data': 'test'}) response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:' assert", "test_common_exception_handler_if_error_without_detail(mocker): exp = APIException({'data': 'test'}) response = common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:'", "= common_exception_handler(exp, mocker.Mock()) assert response.data['service_name'] == 'unittest.mock.Mock:' assert response.data['error_name'] == 'APIException' assert response.data['detail']" ]
[ "list.\"\"\" accounts = api.Account.list().all() assert list(accounts) def test_account(self, account): \"\"\"Test account list.\"\"\" assert", "account list.\"\"\" accounts = api.Account.list().all() assert list(accounts) def test_account(self, account): \"\"\"Test account list.\"\"\"", "from dynaconf import settings class TestAccount: \"\"\"Test Account.\"\"\" def test_account_list(self, api): \"\"\"Test account", "test_account_list(self, api): \"\"\"Test account list.\"\"\" accounts = api.Account.list().all() assert list(accounts) def test_account(self, account):", "class TestAccount: \"\"\"Test Account.\"\"\" def test_account_list(self, api): \"\"\"Test account list.\"\"\" accounts = api.Account.list().all()", "accounts = api.Account.list().all() assert list(accounts) def test_account(self, account): \"\"\"Test account list.\"\"\" assert account.account.name", "= api.Account.list().all() assert list(accounts) def test_account(self, account): \"\"\"Test account list.\"\"\" assert account.account.name ==", "TestAccount: \"\"\"Test Account.\"\"\" def test_account_list(self, api): \"\"\"Test account list.\"\"\" accounts = api.Account.list().all() assert", "\"\"\"Test Account.\"\"\" def test_account_list(self, api): \"\"\"Test account list.\"\"\" accounts = api.Account.list().all() assert list(accounts)", "import settings class TestAccount: \"\"\"Test Account.\"\"\" def test_account_list(self, api): \"\"\"Test account list.\"\"\" accounts", "<filename>tests/service/test_account.py from dynaconf import settings class TestAccount: \"\"\"Test Account.\"\"\" def test_account_list(self, api): \"\"\"Test", "def test_account_list(self, api): \"\"\"Test account list.\"\"\" accounts = api.Account.list().all() assert list(accounts) def test_account(self,", "api): \"\"\"Test account list.\"\"\" accounts = api.Account.list().all() assert list(accounts) def test_account(self, account): \"\"\"Test", "Account.\"\"\" def test_account_list(self, api): \"\"\"Test account list.\"\"\" accounts = api.Account.list().all() assert list(accounts) def", "dynaconf import settings class TestAccount: \"\"\"Test Account.\"\"\" def test_account_list(self, api): \"\"\"Test account list.\"\"\"", "\"\"\"Test account list.\"\"\" accounts = api.Account.list().all() assert list(accounts) def test_account(self, account): \"\"\"Test account", "api.Account.list().all() assert list(accounts) def test_account(self, account): \"\"\"Test account list.\"\"\" assert account.account.name == settings.TEST_ACCOUNT_NAME", "settings class TestAccount: \"\"\"Test Account.\"\"\" def test_account_list(self, api): \"\"\"Test account list.\"\"\" accounts =" ]
[]
[ "'base', 'parent') def __init__(self, size: int, reverse: bool, base: int=0, parent: int=0, *args):", "self.base = base self.parent = parent class RepeaterPlaceholder(Token): __slots__ = ('value',) def __init__(self,", "index: int=None, *args): super(Field, self).__init__(*args) self.index = index self.name = name class Operator(Token):", "class Operator(Token): __slots__ = ('operator',) def __init__(self, operator: str, *args): super(Operator, self).__init__(*args) self.operator", "*args): super(Field, self).__init__(*args) self.index = index self.name = name class Operator(Token): __slots__ =", "__slots__ = ('value',) def __init__(self, value: str=None, *args): super(RepeaterPlaceholder, self).__init__(*args) self.value = value", "Literal(Token): __slots__ = ('value',) def __init__(self, value: str, *args): super(Literal, self).__init__(*args) self.value =", "__init__(self, start: int=None, end: int=None): self.start = start self.end = end @property def", "('single', ) def __init__(self, single: bool, *args): super(Quote, self).__init__(*args) self.single = single class", "class Repeater(Token): __slots__ = ('count', 'value', 'implicit') def __init__(self, count: int, value: int,", "super(RepeaterPlaceholder, self).__init__(*args) self.value = value class Field(Token): __slots__ = ('name', 'index') def __init__(self,", "self.count = count self.value = value self.implicit = implicit class RepeaterNumber(Token): __slots__ =", "dir(self) if not k.startswith('__') and k != 'to_json']) class Repeater(Token): __slots__ = ('count',", "Repeater(Token): __slots__ = ('count', 'value', 'implicit') def __init__(self, count: int, value: int, implicit:", "'implicit') def __init__(self, count: int, value: int, implicit: bool=False, *args): super(Repeater, self).__init__(*args) self.count", "'context') def __init__(self, is_open: bool, context: str, *args): super(Bracket, self).__init__(*args) self.open = is_open", "= is_open self.context = context class Quote(Token): __slots__ = ('single', ) def __init__(self,", "= ('count', 'value', 'implicit') def __init__(self, count: int, value: int, implicit: bool=False, *args):", "super(Repeater, self).__init__(*args) self.count = count self.value = value self.implicit = implicit class RepeaterNumber(Token):", "*args): super(Bracket, self).__init__(*args) self.open = is_open self.context = context class Quote(Token): __slots__ =", "__slots__ = ('single', ) def __init__(self, single: bool, *args): super(Quote, self).__init__(*args) self.single =", "size self.reverse = reverse self.base = base self.parent = parent class RepeaterPlaceholder(Token): __slots__", "__slots__ = ('open', 'context') def __init__(self, is_open: bool, context: str, *args): super(Bracket, self).__init__(*args)", "to_json(self): return dict([(k, self.__getattribute__(k)) for k in dir(self) if not k.startswith('__') and k", "def to_json(self): return dict([(k, self.__getattribute__(k)) for k in dir(self) if not k.startswith('__') and", "class Field(Token): __slots__ = ('name', 'index') def __init__(self, name: str, index: int=None, *args):", "def __init__(self, value: str, *args): super(Literal, self).__init__(*args) self.value = value class WhiteSpace(Token): pass", "= ('open', 'context') def __init__(self, is_open: bool, context: str, *args): super(Bracket, self).__init__(*args) self.open", "is_open: bool, context: str, *args): super(Bracket, self).__init__(*args) self.open = is_open self.context = context", "__init__(self, value: str=None, *args): super(RepeaterPlaceholder, self).__init__(*args) self.value = value class Field(Token): __slots__ =", "single class Literal(Token): __slots__ = ('value',) def __init__(self, value: str, *args): super(Literal, self).__init__(*args)", "__slots__ = ('name', 'index') def __init__(self, name: str, index: int=None, *args): super(Field, self).__init__(*args)", "value: int, implicit: bool=False, *args): super(Repeater, self).__init__(*args) self.count = count self.value = value", "= value class Field(Token): __slots__ = ('name', 'index') def __init__(self, name: str, index:", "= reverse self.base = base self.parent = parent class RepeaterPlaceholder(Token): __slots__ = ('value',)", "bool=False, *args): super(Repeater, self).__init__(*args) self.count = count self.value = value self.implicit = implicit", "size: int, reverse: bool, base: int=0, parent: int=0, *args): super(RepeaterNumber, self).__init__(*args) self.size =", "bool, base: int=0, parent: int=0, *args): super(RepeaterNumber, self).__init__(*args) self.size = size self.reverse =", "value: str=None, *args): super(RepeaterPlaceholder, self).__init__(*args) self.value = value class Field(Token): __slots__ = ('name',", "self).__init__(*args) self.index = index self.name = name class Operator(Token): __slots__ = ('operator',) def", "super(Quote, self).__init__(*args) self.single = single class Literal(Token): __slots__ = ('value',) def __init__(self, value:", "self.start = start self.end = end @property def type(self): \"Type of current token\"", "@property def type(self): \"Type of current token\" return self.__class__.__name__ def to_json(self): return dict([(k,", "Operator(Token): __slots__ = ('operator',) def __init__(self, operator: str, *args): super(Operator, self).__init__(*args) self.operator =", "Token: __slots__ = ('start', 'end') def __init__(self, start: int=None, end: int=None): self.start =", "RepeaterNumber(Token): __slots__ = ('size', 'reverse', 'base', 'parent') def __init__(self, size: int, reverse: bool,", "= ('size', 'reverse', 'base', 'parent') def __init__(self, size: int, reverse: bool, base: int=0,", "context: str, *args): super(Bracket, self).__init__(*args) self.open = is_open self.context = context class Quote(Token):", "class Literal(Token): __slots__ = ('value',) def __init__(self, value: str, *args): super(Literal, self).__init__(*args) self.value", "= name class Operator(Token): __slots__ = ('operator',) def __init__(self, operator: str, *args): super(Operator,", "base: int=0, parent: int=0, *args): super(RepeaterNumber, self).__init__(*args) self.size = size self.reverse = reverse", "token\" return self.__class__.__name__ def to_json(self): return dict([(k, self.__getattribute__(k)) for k in dir(self) if", "int=0, parent: int=0, *args): super(RepeaterNumber, self).__init__(*args) self.size = size self.reverse = reverse self.base", "str, index: int=None, *args): super(Field, self).__init__(*args) self.index = index self.name = name class", "__init__(self, count: int, value: int, implicit: bool=False, *args): super(Repeater, self).__init__(*args) self.count = count", "= operator class Bracket(Token): __slots__ = ('open', 'context') def __init__(self, is_open: bool, context:", "and k != 'to_json']) class Repeater(Token): __slots__ = ('count', 'value', 'implicit') def __init__(self,", "int, implicit: bool=False, *args): super(Repeater, self).__init__(*args) self.count = count self.value = value self.implicit", "k != 'to_json']) class Repeater(Token): __slots__ = ('count', 'value', 'implicit') def __init__(self, count:", "= context class Quote(Token): __slots__ = ('single', ) def __init__(self, single: bool, *args):", "class Quote(Token): __slots__ = ('single', ) def __init__(self, single: bool, *args): super(Quote, self).__init__(*args)", "= ('single', ) def __init__(self, single: bool, *args): super(Quote, self).__init__(*args) self.single = single", "self.parent = parent class RepeaterPlaceholder(Token): __slots__ = ('value',) def __init__(self, value: str=None, *args):", "def __init__(self, is_open: bool, context: str, *args): super(Bracket, self).__init__(*args) self.open = is_open self.context", "def __init__(self, single: bool, *args): super(Quote, self).__init__(*args) self.single = single class Literal(Token): __slots__", "*args): super(Repeater, self).__init__(*args) self.count = count self.value = value self.implicit = implicit class", "context class Quote(Token): __slots__ = ('single', ) def __init__(self, single: bool, *args): super(Quote,", "= parent class RepeaterPlaceholder(Token): __slots__ = ('value',) def __init__(self, value: str=None, *args): super(RepeaterPlaceholder,", "= index self.name = name class Operator(Token): __slots__ = ('operator',) def __init__(self, operator:", "= count self.value = value self.implicit = implicit class RepeaterNumber(Token): __slots__ = ('size',", "self.context = context class Quote(Token): __slots__ = ('single', ) def __init__(self, single: bool,", "type(self): \"Type of current token\" return self.__class__.__name__ def to_json(self): return dict([(k, self.__getattribute__(k)) for", "k in dir(self) if not k.startswith('__') and k != 'to_json']) class Repeater(Token): __slots__", "class Token: __slots__ = ('start', 'end') def __init__(self, start: int=None, end: int=None): self.start", "value class Field(Token): __slots__ = ('name', 'index') def __init__(self, name: str, index: int=None,", "('start', 'end') def __init__(self, start: int=None, end: int=None): self.start = start self.end =", "def __init__(self, operator: str, *args): super(Operator, self).__init__(*args) self.operator = operator class Bracket(Token): __slots__", ") def __init__(self, single: bool, *args): super(Quote, self).__init__(*args) self.single = single class Literal(Token):", "*args): super(Operator, self).__init__(*args) self.operator = operator class Bracket(Token): __slots__ = ('open', 'context') def", "def __init__(self, count: int, value: int, implicit: bool=False, *args): super(Repeater, self).__init__(*args) self.count =", "self.value = value self.implicit = implicit class RepeaterNumber(Token): __slots__ = ('size', 'reverse', 'base',", "= implicit class RepeaterNumber(Token): __slots__ = ('size', 'reverse', 'base', 'parent') def __init__(self, size:", "def __init__(self, size: int, reverse: bool, base: int=0, parent: int=0, *args): super(RepeaterNumber, self).__init__(*args)", "bool, *args): super(Quote, self).__init__(*args) self.single = single class Literal(Token): __slots__ = ('value',) def", "*args): super(RepeaterPlaceholder, self).__init__(*args) self.value = value class Field(Token): __slots__ = ('name', 'index') def", "return self.__class__.__name__ def to_json(self): return dict([(k, self.__getattribute__(k)) for k in dir(self) if not", "super(Field, self).__init__(*args) self.index = index self.name = name class Operator(Token): __slots__ = ('operator',)", "base self.parent = parent class RepeaterPlaceholder(Token): __slots__ = ('value',) def __init__(self, value: str=None,", "'index') def __init__(self, name: str, index: int=None, *args): super(Field, self).__init__(*args) self.index = index", "self).__init__(*args) self.open = is_open self.context = context class Quote(Token): __slots__ = ('single', )", "def type(self): \"Type of current token\" return self.__class__.__name__ def to_json(self): return dict([(k, self.__getattribute__(k))", "k.startswith('__') and k != 'to_json']) class Repeater(Token): __slots__ = ('count', 'value', 'implicit') def", "__init__(self, name: str, index: int=None, *args): super(Field, self).__init__(*args) self.index = index self.name =", "int=None, end: int=None): self.start = start self.end = end @property def type(self): \"Type", "start: int=None, end: int=None): self.start = start self.end = end @property def type(self):", "int=None, *args): super(Field, self).__init__(*args) self.index = index self.name = name class Operator(Token): __slots__", "__init__(self, operator: str, *args): super(Operator, self).__init__(*args) self.operator = operator class Bracket(Token): __slots__ =", "'parent') def __init__(self, size: int, reverse: bool, base: int=0, parent: int=0, *args): super(RepeaterNumber,", "str, *args): super(Bracket, self).__init__(*args) self.open = is_open self.context = context class Quote(Token): __slots__", "self.__class__.__name__ def to_json(self): return dict([(k, self.__getattribute__(k)) for k in dir(self) if not k.startswith('__')", "('count', 'value', 'implicit') def __init__(self, count: int, value: int, implicit: bool=False, *args): super(Repeater,", "super(Operator, self).__init__(*args) self.operator = operator class Bracket(Token): __slots__ = ('open', 'context') def __init__(self,", "single: bool, *args): super(Quote, self).__init__(*args) self.single = single class Literal(Token): __slots__ = ('value',)", "parent class RepeaterPlaceholder(Token): __slots__ = ('value',) def __init__(self, value: str=None, *args): super(RepeaterPlaceholder, self).__init__(*args)", "self).__init__(*args) self.count = count self.value = value self.implicit = implicit class RepeaterNumber(Token): __slots__", "= ('name', 'index') def __init__(self, name: str, index: int=None, *args): super(Field, self).__init__(*args) self.index", "operator class Bracket(Token): __slots__ = ('open', 'context') def __init__(self, is_open: bool, context: str,", "__slots__ = ('start', 'end') def __init__(self, start: int=None, end: int=None): self.start = start", "'to_json']) class Repeater(Token): __slots__ = ('count', 'value', 'implicit') def __init__(self, count: int, value:", "Bracket(Token): __slots__ = ('open', 'context') def __init__(self, is_open: bool, context: str, *args): super(Bracket,", "self).__init__(*args) self.value = value class Field(Token): __slots__ = ('name', 'index') def __init__(self, name:", "self).__init__(*args) self.single = single class Literal(Token): __slots__ = ('value',) def __init__(self, value: str,", "for k in dir(self) if not k.startswith('__') and k != 'to_json']) class Repeater(Token):", "('name', 'index') def __init__(self, name: str, index: int=None, *args): super(Field, self).__init__(*args) self.index =", "__slots__ = ('operator',) def __init__(self, operator: str, *args): super(Operator, self).__init__(*args) self.operator = operator", "= start self.end = end @property def type(self): \"Type of current token\" return", "int, reverse: bool, base: int=0, parent: int=0, *args): super(RepeaterNumber, self).__init__(*args) self.size = size", "('value',) def __init__(self, value: str=None, *args): super(RepeaterPlaceholder, self).__init__(*args) self.value = value class Field(Token):", "self.open = is_open self.context = context class Quote(Token): __slots__ = ('single', ) def", "end @property def type(self): \"Type of current token\" return self.__class__.__name__ def to_json(self): return", "int=0, *args): super(RepeaterNumber, self).__init__(*args) self.size = size self.reverse = reverse self.base = base", "name class Operator(Token): __slots__ = ('operator',) def __init__(self, operator: str, *args): super(Operator, self).__init__(*args)", "self.operator = operator class Bracket(Token): __slots__ = ('open', 'context') def __init__(self, is_open: bool,", "'value', 'implicit') def __init__(self, count: int, value: int, implicit: bool=False, *args): super(Repeater, self).__init__(*args)", "operator: str, *args): super(Operator, self).__init__(*args) self.operator = operator class Bracket(Token): __slots__ = ('open',", "return dict([(k, self.__getattribute__(k)) for k in dir(self) if not k.startswith('__') and k !=", "self.name = name class Operator(Token): __slots__ = ('operator',) def __init__(self, operator: str, *args):", "'reverse', 'base', 'parent') def __init__(self, size: int, reverse: bool, base: int=0, parent: int=0,", "dict([(k, self.__getattribute__(k)) for k in dir(self) if not k.startswith('__') and k != 'to_json'])", "start self.end = end @property def type(self): \"Type of current token\" return self.__class__.__name__", "= ('value',) def __init__(self, value: str=None, *args): super(RepeaterPlaceholder, self).__init__(*args) self.value = value class", "__slots__ = ('value',) def __init__(self, value: str, *args): super(Literal, self).__init__(*args) self.value = value", "= end @property def type(self): \"Type of current token\" return self.__class__.__name__ def to_json(self):", "*args): super(Quote, self).__init__(*args) self.single = single class Literal(Token): __slots__ = ('value',) def __init__(self,", "def __init__(self, name: str, index: int=None, *args): super(Field, self).__init__(*args) self.index = index self.name", "__init__(self, is_open: bool, context: str, *args): super(Bracket, self).__init__(*args) self.open = is_open self.context =", "reverse: bool, base: int=0, parent: int=0, *args): super(RepeaterNumber, self).__init__(*args) self.size = size self.reverse", "def __init__(self, start: int=None, end: int=None): self.start = start self.end = end @property", "class RepeaterPlaceholder(Token): __slots__ = ('value',) def __init__(self, value: str=None, *args): super(RepeaterPlaceholder, self).__init__(*args) self.value", "= size self.reverse = reverse self.base = base self.parent = parent class RepeaterPlaceholder(Token):", "count: int, value: int, implicit: bool=False, *args): super(Repeater, self).__init__(*args) self.count = count self.value", "super(RepeaterNumber, self).__init__(*args) self.size = size self.reverse = reverse self.base = base self.parent =", "super(Bracket, self).__init__(*args) self.open = is_open self.context = context class Quote(Token): __slots__ = ('single',", "int, value: int, implicit: bool=False, *args): super(Repeater, self).__init__(*args) self.count = count self.value =", "current token\" return self.__class__.__name__ def to_json(self): return dict([(k, self.__getattribute__(k)) for k in dir(self)", "RepeaterPlaceholder(Token): __slots__ = ('value',) def __init__(self, value: str=None, *args): super(RepeaterPlaceholder, self).__init__(*args) self.value =", "Quote(Token): __slots__ = ('single', ) def __init__(self, single: bool, *args): super(Quote, self).__init__(*args) self.single", "def __init__(self, value: str=None, *args): super(RepeaterPlaceholder, self).__init__(*args) self.value = value class Field(Token): __slots__", "of current token\" return self.__class__.__name__ def to_json(self): return dict([(k, self.__getattribute__(k)) for k in", "= value self.implicit = implicit class RepeaterNumber(Token): __slots__ = ('size', 'reverse', 'base', 'parent')", "count self.value = value self.implicit = implicit class RepeaterNumber(Token): __slots__ = ('size', 'reverse',", "class Bracket(Token): __slots__ = ('open', 'context') def __init__(self, is_open: bool, context: str, *args):", "str=None, *args): super(RepeaterPlaceholder, self).__init__(*args) self.value = value class Field(Token): __slots__ = ('name', 'index')", "self).__init__(*args) self.operator = operator class Bracket(Token): __slots__ = ('open', 'context') def __init__(self, is_open:", "self.single = single class Literal(Token): __slots__ = ('value',) def __init__(self, value: str, *args):", "not k.startswith('__') and k != 'to_json']) class Repeater(Token): __slots__ = ('count', 'value', 'implicit')", "*args): super(RepeaterNumber, self).__init__(*args) self.size = size self.reverse = reverse self.base = base self.parent", "implicit class RepeaterNumber(Token): __slots__ = ('size', 'reverse', 'base', 'parent') def __init__(self, size: int,", "= ('start', 'end') def __init__(self, start: int=None, end: int=None): self.start = start self.end", "\"Type of current token\" return self.__class__.__name__ def to_json(self): return dict([(k, self.__getattribute__(k)) for k", "= ('operator',) def __init__(self, operator: str, *args): super(Operator, self).__init__(*args) self.operator = operator class", "('operator',) def __init__(self, operator: str, *args): super(Operator, self).__init__(*args) self.operator = operator class Bracket(Token):", "self).__init__(*args) self.size = size self.reverse = reverse self.base = base self.parent = parent", "= ('value',) def __init__(self, value: str, *args): super(Literal, self).__init__(*args) self.value = value class", "bool, context: str, *args): super(Bracket, self).__init__(*args) self.open = is_open self.context = context class", "int=None): self.start = start self.end = end @property def type(self): \"Type of current", "self.__getattribute__(k)) for k in dir(self) if not k.startswith('__') and k != 'to_json']) class", "in dir(self) if not k.startswith('__') and k != 'to_json']) class Repeater(Token): __slots__ =", "name: str, index: int=None, *args): super(Field, self).__init__(*args) self.index = index self.name = name", "__slots__ = ('count', 'value', 'implicit') def __init__(self, count: int, value: int, implicit: bool=False,", "value self.implicit = implicit class RepeaterNumber(Token): __slots__ = ('size', 'reverse', 'base', 'parent') def", "is_open self.context = context class Quote(Token): __slots__ = ('single', ) def __init__(self, single:", "reverse self.base = base self.parent = parent class RepeaterPlaceholder(Token): __slots__ = ('value',) def", "self.value = value class Field(Token): __slots__ = ('name', 'index') def __init__(self, name: str,", "self.size = size self.reverse = reverse self.base = base self.parent = parent class", "= base self.parent = parent class RepeaterPlaceholder(Token): __slots__ = ('value',) def __init__(self, value:", "__init__(self, single: bool, *args): super(Quote, self).__init__(*args) self.single = single class Literal(Token): __slots__ =", "!= 'to_json']) class Repeater(Token): __slots__ = ('count', 'value', 'implicit') def __init__(self, count: int,", "index self.name = name class Operator(Token): __slots__ = ('operator',) def __init__(self, operator: str,", "self.reverse = reverse self.base = base self.parent = parent class RepeaterPlaceholder(Token): __slots__ =", "self.end = end @property def type(self): \"Type of current token\" return self.__class__.__name__ def", "('open', 'context') def __init__(self, is_open: bool, context: str, *args): super(Bracket, self).__init__(*args) self.open =", "self.implicit = implicit class RepeaterNumber(Token): __slots__ = ('size', 'reverse', 'base', 'parent') def __init__(self,", "str, *args): super(Operator, self).__init__(*args) self.operator = operator class Bracket(Token): __slots__ = ('open', 'context')", "end: int=None): self.start = start self.end = end @property def type(self): \"Type of", "if not k.startswith('__') and k != 'to_json']) class Repeater(Token): __slots__ = ('count', 'value',", "__init__(self, size: int, reverse: bool, base: int=0, parent: int=0, *args): super(RepeaterNumber, self).__init__(*args) self.size", "('value',) def __init__(self, value: str, *args): super(Literal, self).__init__(*args) self.value = value class WhiteSpace(Token):", "= single class Literal(Token): __slots__ = ('value',) def __init__(self, value: str, *args): super(Literal,", "__slots__ = ('size', 'reverse', 'base', 'parent') def __init__(self, size: int, reverse: bool, base:", "parent: int=0, *args): super(RepeaterNumber, self).__init__(*args) self.size = size self.reverse = reverse self.base =", "Field(Token): __slots__ = ('name', 'index') def __init__(self, name: str, index: int=None, *args): super(Field,", "class RepeaterNumber(Token): __slots__ = ('size', 'reverse', 'base', 'parent') def __init__(self, size: int, reverse:", "'end') def __init__(self, start: int=None, end: int=None): self.start = start self.end = end", "('size', 'reverse', 'base', 'parent') def __init__(self, size: int, reverse: bool, base: int=0, parent:", "implicit: bool=False, *args): super(Repeater, self).__init__(*args) self.count = count self.value = value self.implicit =", "self.index = index self.name = name class Operator(Token): __slots__ = ('operator',) def __init__(self," ]
[ "import * from thermodynamics import * from dynamics import * from box import", "* from dynamics import * from box import * from stats import *", "thermodynamics import * from dynamics import * from box import * from stats", "* from box import * from stats import * from meta import *", "import * from box import * from stats import * from meta import", "from dynamics import * from box import * from stats import * from", "dynamics import * from box import * from stats import * from meta", "from radioactivity import * from thermodynamics import * from dynamics import * from", "* from thermodynamics import * from dynamics import * from box import *", "import * from dynamics import * from box import * from stats import", "radioactivity import * from thermodynamics import * from dynamics import * from box", "from thermodynamics import * from dynamics import * from box import * from" ]
[ "final = price - discount print(f'The final price is: {final} lv.\\nThe discount is:", "* 7.61 discount = price * 0.18 final = price - discount print(f'The", "= price * 0.18 final = price - discount print(f'The final price is:", "price = float(input()) * 7.61 discount = price * 0.18 final = price", "price * 0.18 final = price - discount print(f'The final price is: {final}", "* 0.18 final = price - discount print(f'The final price is: {final} lv.\\nThe", "0.18 final = price - discount print(f'The final price is: {final} lv.\\nThe discount", "price - discount print(f'The final price is: {final} lv.\\nThe discount is: {discount} lv.')", "float(input()) * 7.61 discount = price * 0.18 final = price - discount", "= float(input()) * 7.61 discount = price * 0.18 final = price -", "<gh_stars>0 price = float(input()) * 7.61 discount = price * 0.18 final =", "discount = price * 0.18 final = price - discount print(f'The final price", "7.61 discount = price * 0.18 final = price - discount print(f'The final", "= price - discount print(f'The final price is: {final} lv.\\nThe discount is: {discount}" ]
[ "= 'message', text_color: str = None) -> None: if text_color: self.statusBar().setStyleSheet(f'color : {text_color}')", "running_tasks = [executor.submit(self.process, torrent) for torrent in torrents] for running_task in running_tasks: running_task.result()", "files = QFileDialog.getOpenFileNames(self, 'Open a file', '', 'All Files (*.*)') files = files[0]", "_type == 'message': text_color = 'black' else: text_color = 'black' self.statusBar().setStyleSheet(f'color : {text_color}')", "if _type == 'error': text_color = 'red' elif _type == 'message': text_color =", "__init__(self, parent=None, **kwargs): super().__init__(parent) self.setupUi(self) # Triggers self.action_Add_Files.triggered.connect(self.add_files) self.action_Exit.triggered.connect(self.close) def retranslateUi(self, MainFrame): super().retranslateUi(MainFrame)", "None: if text_color: self.statusBar().setStyleSheet(f'color : {text_color}') self.statusbar.showMessage(message) return None if _type == 'error':", "for file in files: try: torrents.append(Torrent(file)) except Exception as e: broken_files.append(pathlib.Path(file).name) pass if", "self.statusBar().setStyleSheet(f'color : {text_color}') self.statusbar.showMessage(message) return None if _type == 'error': text_color = 'red'", "Triggers self.action_Add_Files.triggered.connect(self.add_files) self.action_Exit.triggered.connect(self.close) def retranslateUi(self, MainFrame): super().retranslateUi(MainFrame) MainFrame.setWindowTitle(settings.NAME.capitalize()) def closeEvent(self, event) -> None:", "QFileDialog import threading from clutcher import settings from torrent.structure.torrent import Torrent from ui.generated", "be not self.tr, but translate because must be an event! reply = QMessageBox.question(self,", "event.ignore() def process(self, torrent: Torrent) -> None: # self.save_to_database(torrent) print(f'Task Executed {threading.current_thread()}') def", "-> None: super().closeEvent(event) # TODO: Should be not self.tr, but translate because must", "self.statusbar.showMessage(message) return None if _type == 'error': text_color = 'red' elif _type ==", "in files: try: torrents.append(Torrent(file)) except Exception as e: broken_files.append(pathlib.Path(file).name) pass if broken_files: self.show_message(f'Errors", "pass if broken_files: self.show_message(f'Errors for files: {\", \".join(broken_files)} have occurred.', _type='error') with ThreadPoolExecutor()", "Ui_MainFrame class MainFrame(QMainWindow, Ui_MainFrame): def __init__(self, parent=None, **kwargs): super().__init__(parent) self.setupUi(self) # Triggers self.action_Add_Files.triggered.connect(self.add_files)", "'', 'All Files (*.*)') files = files[0] broken_files = [] torrents = []", "files = files[0] broken_files = [] torrents = [] for file in files:", "event! reply = QMessageBox.question(self, 'Message', self.tr('Are you sure to quit?'), QMessageBox.Yes, QMessageBox.No) if", "message: str, _type: str = 'message', text_color: str = None) -> None: if", "threading from clutcher import settings from torrent.structure.torrent import Torrent from ui.generated import Ui_MainFrame", "# self.save_to_database(torrent) print(f'Task Executed {threading.current_thread()}') def add_files(self) -> None: files = QFileDialog.getOpenFileNames(self, 'Open", "str, _type: str = 'message', text_color: str = None) -> None: if text_color:", "def add_files(self) -> None: files = QFileDialog.getOpenFileNames(self, 'Open a file', '', 'All Files", "'All Files (*.*)') files = files[0] broken_files = [] torrents = [] for", "QMessageBox, QFileDialog import threading from clutcher import settings from torrent.structure.torrent import Torrent from", "'Message', self.tr('Are you sure to quit?'), QMessageBox.Yes, QMessageBox.No) if reply == QMessageBox.Yes: event.accept()", "from ui.generated import Ui_MainFrame class MainFrame(QMainWindow, Ui_MainFrame): def __init__(self, parent=None, **kwargs): super().__init__(parent) self.setupUi(self)", "str = 'message', text_color: str = None) -> None: if text_color: self.statusBar().setStyleSheet(f'color :", "= files[0] broken_files = [] torrents = [] for file in files: try:", "class MainFrame(QMainWindow, Ui_MainFrame): def __init__(self, parent=None, **kwargs): super().__init__(parent) self.setupUi(self) # Triggers self.action_Add_Files.triggered.connect(self.add_files) self.action_Exit.triggered.connect(self.close)", "a file', '', 'All Files (*.*)') files = files[0] broken_files = [] torrents", "clutcher import settings from torrent.structure.torrent import Torrent from ui.generated import Ui_MainFrame class MainFrame(QMainWindow,", "for running_task in running_tasks: running_task.result() def show_message(self, message: str, _type: str = 'message',", "not self.tr, but translate because must be an event! reply = QMessageBox.question(self, 'Message',", "retranslateUi(self, MainFrame): super().retranslateUi(MainFrame) MainFrame.setWindowTitle(settings.NAME.capitalize()) def closeEvent(self, event) -> None: super().closeEvent(event) # TODO: Should", "else: event.ignore() def process(self, torrent: Torrent) -> None: # self.save_to_database(torrent) print(f'Task Executed {threading.current_thread()}')", "torrents = [] for file in files: try: torrents.append(Torrent(file)) except Exception as e:", "broken_files.append(pathlib.Path(file).name) pass if broken_files: self.show_message(f'Errors for files: {\", \".join(broken_files)} have occurred.', _type='error') with", "from clutcher import settings from torrent.structure.torrent import Torrent from ui.generated import Ui_MainFrame class", "self.setupUi(self) # Triggers self.action_Add_Files.triggered.connect(self.add_files) self.action_Exit.triggered.connect(self.close) def retranslateUi(self, MainFrame): super().retranslateUi(MainFrame) MainFrame.setWindowTitle(settings.NAME.capitalize()) def closeEvent(self, event)", "None if _type == 'error': text_color = 'red' elif _type == 'message': text_color", "text_color = 'red' elif _type == 'message': text_color = 'black' else: text_color =", "if broken_files: self.show_message(f'Errors for files: {\", \".join(broken_files)} have occurred.', _type='error') with ThreadPoolExecutor() as", "file in files: try: torrents.append(Torrent(file)) except Exception as e: broken_files.append(pathlib.Path(file).name) pass if broken_files:", "_type='error') with ThreadPoolExecutor() as executor: running_tasks = [executor.submit(self.process, torrent) for torrent in torrents]", "self.tr('Are you sure to quit?'), QMessageBox.Yes, QMessageBox.No) if reply == QMessageBox.Yes: event.accept() else:", "in running_tasks: running_task.result() def show_message(self, message: str, _type: str = 'message', text_color: str", "broken_files: self.show_message(f'Errors for files: {\", \".join(broken_files)} have occurred.', _type='error') with ThreadPoolExecutor() as executor:", "super().closeEvent(event) # TODO: Should be not self.tr, but translate because must be an", "= QFileDialog.getOpenFileNames(self, 'Open a file', '', 'All Files (*.*)') files = files[0] broken_files", "-> None: files = QFileDialog.getOpenFileNames(self, 'Open a file', '', 'All Files (*.*)') files", "torrents] for running_task in running_tasks: running_task.result() def show_message(self, message: str, _type: str =", "None: # self.save_to_database(torrent) print(f'Task Executed {threading.current_thread()}') def add_files(self) -> None: files = QFileDialog.getOpenFileNames(self,", "'Open a file', '', 'All Files (*.*)') files = files[0] broken_files = []", "translate because must be an event! reply = QMessageBox.question(self, 'Message', self.tr('Are you sure", "= [executor.submit(self.process, torrent) for torrent in torrents] for running_task in running_tasks: running_task.result() def", "Files (*.*)') files = files[0] broken_files = [] torrents = [] for file", "def closeEvent(self, event) -> None: super().closeEvent(event) # TODO: Should be not self.tr, but", "None) -> None: if text_color: self.statusBar().setStyleSheet(f'color : {text_color}') self.statusbar.showMessage(message) return None if _type", "def __init__(self, parent=None, **kwargs): super().__init__(parent) self.setupUi(self) # Triggers self.action_Add_Files.triggered.connect(self.add_files) self.action_Exit.triggered.connect(self.close) def retranslateUi(self, MainFrame):", "self.action_Exit.triggered.connect(self.close) def retranslateUi(self, MainFrame): super().retranslateUi(MainFrame) MainFrame.setWindowTitle(settings.NAME.capitalize()) def closeEvent(self, event) -> None: super().closeEvent(event) #", "QMessageBox.question(self, 'Message', self.tr('Are you sure to quit?'), QMessageBox.Yes, QMessageBox.No) if reply == QMessageBox.Yes:", "import QMainWindow, QMessageBox, QFileDialog import threading from clutcher import settings from torrent.structure.torrent import", "running_tasks: running_task.result() def show_message(self, message: str, _type: str = 'message', text_color: str =", "closeEvent(self, event) -> None: super().closeEvent(event) # TODO: Should be not self.tr, but translate", "file', '', 'All Files (*.*)') files = files[0] broken_files = [] torrents =", "from torrent.structure.torrent import Torrent from ui.generated import Ui_MainFrame class MainFrame(QMainWindow, Ui_MainFrame): def __init__(self,", "try: torrents.append(Torrent(file)) except Exception as e: broken_files.append(pathlib.Path(file).name) pass if broken_files: self.show_message(f'Errors for files:", "QMessageBox.Yes: event.accept() else: event.ignore() def process(self, torrent: Torrent) -> None: # self.save_to_database(torrent) print(f'Task", "Exception as e: broken_files.append(pathlib.Path(file).name) pass if broken_files: self.show_message(f'Errors for files: {\", \".join(broken_files)} have", "'red' elif _type == 'message': text_color = 'black' else: text_color = 'black' self.statusBar().setStyleSheet(f'color", "running_task in running_tasks: running_task.result() def show_message(self, message: str, _type: str = 'message', text_color:", "import threading from clutcher import settings from torrent.structure.torrent import Torrent from ui.generated import", "broken_files = [] torrents = [] for file in files: try: torrents.append(Torrent(file)) except", "QFileDialog.getOpenFileNames(self, 'Open a file', '', 'All Files (*.*)') files = files[0] broken_files =", "{text_color}') self.statusbar.showMessage(message) return None if _type == 'error': text_color = 'red' elif _type", "for torrent in torrents] for running_task in running_tasks: running_task.result() def show_message(self, message: str,", "files: try: torrents.append(Torrent(file)) except Exception as e: broken_files.append(pathlib.Path(file).name) pass if broken_files: self.show_message(f'Errors for", "Should be not self.tr, but translate because must be an event! reply =", "occurred.', _type='error') with ThreadPoolExecutor() as executor: running_tasks = [executor.submit(self.process, torrent) for torrent in", "import Ui_MainFrame class MainFrame(QMainWindow, Ui_MainFrame): def __init__(self, parent=None, **kwargs): super().__init__(parent) self.setupUi(self) # Triggers", "[executor.submit(self.process, torrent) for torrent in torrents] for running_task in running_tasks: running_task.result() def show_message(self,", "QMessageBox.Yes, QMessageBox.No) if reply == QMessageBox.Yes: event.accept() else: event.ignore() def process(self, torrent: Torrent)", "Torrent from ui.generated import Ui_MainFrame class MainFrame(QMainWindow, Ui_MainFrame): def __init__(self, parent=None, **kwargs): super().__init__(parent)", "None: super().closeEvent(event) # TODO: Should be not self.tr, but translate because must be", "Executed {threading.current_thread()}') def add_files(self) -> None: files = QFileDialog.getOpenFileNames(self, 'Open a file', '',", "quit?'), QMessageBox.Yes, QMessageBox.No) if reply == QMessageBox.Yes: event.accept() else: event.ignore() def process(self, torrent:", "with ThreadPoolExecutor() as executor: running_tasks = [executor.submit(self.process, torrent) for torrent in torrents] for", "print(f'Task Executed {threading.current_thread()}') def add_files(self) -> None: files = QFileDialog.getOpenFileNames(self, 'Open a file',", "torrents.append(Torrent(file)) except Exception as e: broken_files.append(pathlib.Path(file).name) pass if broken_files: self.show_message(f'Errors for files: {\",", "{threading.current_thread()}') def add_files(self) -> None: files = QFileDialog.getOpenFileNames(self, 'Open a file', '', 'All", "self.action_Add_Files.triggered.connect(self.add_files) self.action_Exit.triggered.connect(self.close) def retranslateUi(self, MainFrame): super().retranslateUi(MainFrame) MainFrame.setWindowTitle(settings.NAME.capitalize()) def closeEvent(self, event) -> None: super().closeEvent(event)", "{\", \".join(broken_files)} have occurred.', _type='error') with ThreadPoolExecutor() as executor: running_tasks = [executor.submit(self.process, torrent)", "if text_color: self.statusBar().setStyleSheet(f'color : {text_color}') self.statusbar.showMessage(message) return None if _type == 'error': text_color", "e: broken_files.append(pathlib.Path(file).name) pass if broken_files: self.show_message(f'Errors for files: {\", \".join(broken_files)} have occurred.', _type='error')", "for files: {\", \".join(broken_files)} have occurred.', _type='error') with ThreadPoolExecutor() as executor: running_tasks =", "= 'red' elif _type == 'message': text_color = 'black' else: text_color = 'black'", "an event! reply = QMessageBox.question(self, 'Message', self.tr('Are you sure to quit?'), QMessageBox.Yes, QMessageBox.No)", "PyQt5.QtWidgets import QMainWindow, QMessageBox, QFileDialog import threading from clutcher import settings from torrent.structure.torrent", "as executor: running_tasks = [executor.submit(self.process, torrent) for torrent in torrents] for running_task in", "torrent) for torrent in torrents] for running_task in running_tasks: running_task.result() def show_message(self, message:", "files[0] broken_files = [] torrents = [] for file in files: try: torrents.append(Torrent(file))", "in torrents] for running_task in running_tasks: running_task.result() def show_message(self, message: str, _type: str", "parent=None, **kwargs): super().__init__(parent) self.setupUi(self) # Triggers self.action_Add_Files.triggered.connect(self.add_files) self.action_Exit.triggered.connect(self.close) def retranslateUi(self, MainFrame): super().retranslateUi(MainFrame) MainFrame.setWindowTitle(settings.NAME.capitalize())", "text_color: str = None) -> None: if text_color: self.statusBar().setStyleSheet(f'color : {text_color}') self.statusbar.showMessage(message) return", "MainFrame): super().retranslateUi(MainFrame) MainFrame.setWindowTitle(settings.NAME.capitalize()) def closeEvent(self, event) -> None: super().closeEvent(event) # TODO: Should be", "settings from torrent.structure.torrent import Torrent from ui.generated import Ui_MainFrame class MainFrame(QMainWindow, Ui_MainFrame): def", "but translate because must be an event! reply = QMessageBox.question(self, 'Message', self.tr('Are you", "super().retranslateUi(MainFrame) MainFrame.setWindowTitle(settings.NAME.capitalize()) def closeEvent(self, event) -> None: super().closeEvent(event) # TODO: Should be not", "== QMessageBox.Yes: event.accept() else: event.ignore() def process(self, torrent: Torrent) -> None: # self.save_to_database(torrent)", "event.accept() else: event.ignore() def process(self, torrent: Torrent) -> None: # self.save_to_database(torrent) print(f'Task Executed", "as e: broken_files.append(pathlib.Path(file).name) pass if broken_files: self.show_message(f'Errors for files: {\", \".join(broken_files)} have occurred.',", "event) -> None: super().closeEvent(event) # TODO: Should be not self.tr, but translate because", "torrent: Torrent) -> None: # self.save_to_database(torrent) print(f'Task Executed {threading.current_thread()}') def add_files(self) -> None:", "def retranslateUi(self, MainFrame): super().retranslateUi(MainFrame) MainFrame.setWindowTitle(settings.NAME.capitalize()) def closeEvent(self, event) -> None: super().closeEvent(event) # TODO:", "if reply == QMessageBox.Yes: event.accept() else: event.ignore() def process(self, torrent: Torrent) -> None:", "def show_message(self, message: str, _type: str = 'message', text_color: str = None) ->", "self.save_to_database(torrent) print(f'Task Executed {threading.current_thread()}') def add_files(self) -> None: files = QFileDialog.getOpenFileNames(self, 'Open a", "-> None: # self.save_to_database(torrent) print(f'Task Executed {threading.current_thread()}') def add_files(self) -> None: files =", "except Exception as e: broken_files.append(pathlib.Path(file).name) pass if broken_files: self.show_message(f'Errors for files: {\", \".join(broken_files)}", "pathlib from PyQt5.QtWidgets import QMainWindow, QMessageBox, QFileDialog import threading from clutcher import settings", "import pathlib from PyQt5.QtWidgets import QMainWindow, QMessageBox, QFileDialog import threading from clutcher import", "ThreadPoolExecutor import pathlib from PyQt5.QtWidgets import QMainWindow, QMessageBox, QFileDialog import threading from clutcher", "'error': text_color = 'red' elif _type == 'message': text_color = 'black' else: text_color", "running_task.result() def show_message(self, message: str, _type: str = 'message', text_color: str = None)", "to quit?'), QMessageBox.Yes, QMessageBox.No) if reply == QMessageBox.Yes: event.accept() else: event.ignore() def process(self,", "ui.generated import Ui_MainFrame class MainFrame(QMainWindow, Ui_MainFrame): def __init__(self, parent=None, **kwargs): super().__init__(parent) self.setupUi(self) #", "super().__init__(parent) self.setupUi(self) # Triggers self.action_Add_Files.triggered.connect(self.add_files) self.action_Exit.triggered.connect(self.close) def retranslateUi(self, MainFrame): super().retranslateUi(MainFrame) MainFrame.setWindowTitle(settings.NAME.capitalize()) def closeEvent(self,", "self.show_message(f'Errors for files: {\", \".join(broken_files)} have occurred.', _type='error') with ThreadPoolExecutor() as executor: running_tasks", "elif _type == 'message': text_color = 'black' else: text_color = 'black' self.statusBar().setStyleSheet(f'color :", "torrent.structure.torrent import Torrent from ui.generated import Ui_MainFrame class MainFrame(QMainWindow, Ui_MainFrame): def __init__(self, parent=None,", "-> None: if text_color: self.statusBar().setStyleSheet(f'color : {text_color}') self.statusbar.showMessage(message) return None if _type ==", "None: files = QFileDialog.getOpenFileNames(self, 'Open a file', '', 'All Files (*.*)') files =", "_type == 'error': text_color = 'red' elif _type == 'message': text_color = 'black'", "concurrent.futures import ThreadPoolExecutor import pathlib from PyQt5.QtWidgets import QMainWindow, QMessageBox, QFileDialog import threading", "QMainWindow, QMessageBox, QFileDialog import threading from clutcher import settings from torrent.structure.torrent import Torrent", "from PyQt5.QtWidgets import QMainWindow, QMessageBox, QFileDialog import threading from clutcher import settings from", "= [] torrents = [] for file in files: try: torrents.append(Torrent(file)) except Exception", "== 'message': text_color = 'black' else: text_color = 'black' self.statusBar().setStyleSheet(f'color : {text_color}') self.statusbar.showMessage(message)", "torrent in torrents] for running_task in running_tasks: running_task.result() def show_message(self, message: str, _type:", "\".join(broken_files)} have occurred.', _type='error') with ThreadPoolExecutor() as executor: running_tasks = [executor.submit(self.process, torrent) for", "import ThreadPoolExecutor import pathlib from PyQt5.QtWidgets import QMainWindow, QMessageBox, QFileDialog import threading from", "process(self, torrent: Torrent) -> None: # self.save_to_database(torrent) print(f'Task Executed {threading.current_thread()}') def add_files(self) ->", "reply == QMessageBox.Yes: event.accept() else: event.ignore() def process(self, torrent: Torrent) -> None: #", "text_color: self.statusBar().setStyleSheet(f'color : {text_color}') self.statusbar.showMessage(message) return None if _type == 'error': text_color =", "**kwargs): super().__init__(parent) self.setupUi(self) # Triggers self.action_Add_Files.triggered.connect(self.add_files) self.action_Exit.triggered.connect(self.close) def retranslateUi(self, MainFrame): super().retranslateUi(MainFrame) MainFrame.setWindowTitle(settings.NAME.capitalize()) def", "self.tr, but translate because must be an event! reply = QMessageBox.question(self, 'Message', self.tr('Are", "MainFrame.setWindowTitle(settings.NAME.capitalize()) def closeEvent(self, event) -> None: super().closeEvent(event) # TODO: Should be not self.tr,", "reply = QMessageBox.question(self, 'Message', self.tr('Are you sure to quit?'), QMessageBox.Yes, QMessageBox.No) if reply", "from concurrent.futures import ThreadPoolExecutor import pathlib from PyQt5.QtWidgets import QMainWindow, QMessageBox, QFileDialog import", "<filename>ui/gui.py<gh_stars>0 from concurrent.futures import ThreadPoolExecutor import pathlib from PyQt5.QtWidgets import QMainWindow, QMessageBox, QFileDialog", "executor: running_tasks = [executor.submit(self.process, torrent) for torrent in torrents] for running_task in running_tasks:", "return None if _type == 'error': text_color = 'red' elif _type == 'message':", "(*.*)') files = files[0] broken_files = [] torrents = [] for file in", "str = None) -> None: if text_color: self.statusBar().setStyleSheet(f'color : {text_color}') self.statusbar.showMessage(message) return None", "add_files(self) -> None: files = QFileDialog.getOpenFileNames(self, 'Open a file', '', 'All Files (*.*)')", "import settings from torrent.structure.torrent import Torrent from ui.generated import Ui_MainFrame class MainFrame(QMainWindow, Ui_MainFrame):", "# Triggers self.action_Add_Files.triggered.connect(self.add_files) self.action_Exit.triggered.connect(self.close) def retranslateUi(self, MainFrame): super().retranslateUi(MainFrame) MainFrame.setWindowTitle(settings.NAME.capitalize()) def closeEvent(self, event) ->", "show_message(self, message: str, _type: str = 'message', text_color: str = None) -> None:", "be an event! reply = QMessageBox.question(self, 'Message', self.tr('Are you sure to quit?'), QMessageBox.Yes,", "Ui_MainFrame): def __init__(self, parent=None, **kwargs): super().__init__(parent) self.setupUi(self) # Triggers self.action_Add_Files.triggered.connect(self.add_files) self.action_Exit.triggered.connect(self.close) def retranslateUi(self,", ": {text_color}') self.statusbar.showMessage(message) return None if _type == 'error': text_color = 'red' elif", "[] torrents = [] for file in files: try: torrents.append(Torrent(file)) except Exception as", "'message', text_color: str = None) -> None: if text_color: self.statusBar().setStyleSheet(f'color : {text_color}') self.statusbar.showMessage(message)", "_type: str = 'message', text_color: str = None) -> None: if text_color: self.statusBar().setStyleSheet(f'color", "= [] for file in files: try: torrents.append(Torrent(file)) except Exception as e: broken_files.append(pathlib.Path(file).name)", "import Torrent from ui.generated import Ui_MainFrame class MainFrame(QMainWindow, Ui_MainFrame): def __init__(self, parent=None, **kwargs):", "# TODO: Should be not self.tr, but translate because must be an event!", "MainFrame(QMainWindow, Ui_MainFrame): def __init__(self, parent=None, **kwargs): super().__init__(parent) self.setupUi(self) # Triggers self.action_Add_Files.triggered.connect(self.add_files) self.action_Exit.triggered.connect(self.close) def", "TODO: Should be not self.tr, but translate because must be an event! reply", "you sure to quit?'), QMessageBox.Yes, QMessageBox.No) if reply == QMessageBox.Yes: event.accept() else: event.ignore()", "[] for file in files: try: torrents.append(Torrent(file)) except Exception as e: broken_files.append(pathlib.Path(file).name) pass", "= None) -> None: if text_color: self.statusBar().setStyleSheet(f'color : {text_color}') self.statusbar.showMessage(message) return None if", "Torrent) -> None: # self.save_to_database(torrent) print(f'Task Executed {threading.current_thread()}') def add_files(self) -> None: files", "ThreadPoolExecutor() as executor: running_tasks = [executor.submit(self.process, torrent) for torrent in torrents] for running_task", "QMessageBox.No) if reply == QMessageBox.Yes: event.accept() else: event.ignore() def process(self, torrent: Torrent) ->", "files: {\", \".join(broken_files)} have occurred.', _type='error') with ThreadPoolExecutor() as executor: running_tasks = [executor.submit(self.process,", "== 'error': text_color = 'red' elif _type == 'message': text_color = 'black' else:", "must be an event! reply = QMessageBox.question(self, 'Message', self.tr('Are you sure to quit?'),", "def process(self, torrent: Torrent) -> None: # self.save_to_database(torrent) print(f'Task Executed {threading.current_thread()}') def add_files(self)", "= QMessageBox.question(self, 'Message', self.tr('Are you sure to quit?'), QMessageBox.Yes, QMessageBox.No) if reply ==", "sure to quit?'), QMessageBox.Yes, QMessageBox.No) if reply == QMessageBox.Yes: event.accept() else: event.ignore() def", "because must be an event! reply = QMessageBox.question(self, 'Message', self.tr('Are you sure to", "have occurred.', _type='error') with ThreadPoolExecutor() as executor: running_tasks = [executor.submit(self.process, torrent) for torrent" ]
[ "add a user to a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/create_group_membership.json\") @classmethod def create(cls, user_ref,", "body for adding users to groups. :param user_ref: Custom user reference :param group_ref:", "= [\"authority\", \"authority_provided_id\"] class CreateGroupMembership(JSONAPIData): \"\"\"The data to add a user to a", "groups. :param user_ref: Custom user reference :param group_ref: Custom group reference :return: \"\"\"", "which is a member of this group. :return: A value object with `id`", "which item to update.\"\"\" return self.meta[\"query\"] class UpsertUser(UpsertBody): \"\"\"The data to upsert a", "query = {field: attributes.pop(field, None) for field in cls.query_fields} return super().create( data_type=cls.data_type, attributes=attributes,", "value object with `id` and `ref` properties. \"\"\" return _IdRef(self.relationships[\"group\"][\"data\"][\"id\"]) class _IdRef: \"\"\"A", "upsert a user.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_user.json\") data_type = DataType.USER query_fields = [\"authority\", \"username\"]", "@property def group(self): \"\"\"The group which this user is a member of. :return:", "to update.\"\"\" return self.meta[\"query\"] class UpsertUser(UpsertBody): \"\"\"The data to upsert a user.\"\"\" validator", "upsert a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_group.json\") data_type = DataType.GROUP query_fields = [\"authority\", \"authority_provided_id\"]", "def member(self): \"\"\"The user which is a member of this group. :return: A", "relationships={ \"member\": { \"data\": {\"type\": DataType.USER.value, \"id\": {\"$ref\": user_ref}} }, \"group\": { \"data\":", "data_type = None query_fields = [] @classmethod def create(cls, attributes, id_reference): query =", "attributes, id_reference): query = {field: attributes.pop(field, None) for field in cls.query_fields} return super().create(", "def group(self): \"\"\"The group which this user is a member of. :return: A", "class _IdRef: \"\"\"A value object which represents an id reference or concrete id.\"\"\"", "id_reference): query = {field: attributes.pop(field, None) for field in cls.query_fields} return super().create( data_type=cls.data_type,", "import DataType from h.h_api.model.json_api import JSONAPIData from h.h_api.schema import Schema class UpsertBody(JSONAPIData): data_type", "A value object with `id` and `ref` properties. \"\"\" return _IdRef(self.relationships[\"member\"][\"data\"][\"id\"]) @property def", "class UpsertUser(UpsertBody): \"\"\"The data to upsert a user.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_user.json\") data_type =", "def __init__(self, value): if isinstance(value, dict): self.id, self.ref = None, value.get(\"$ref\") else: self.id,", "user reference :param group_ref: Custom group reference :return: \"\"\" return super().create( DataType.GROUP_MEMBERSHIP, relationships={", "a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/create_group_membership.json\") @classmethod def create(cls, user_ref, group_ref): \"\"\" Create a", "= Schema.get_validator(\"bulk_api/command/upsert_user.json\") data_type = DataType.USER query_fields = [\"authority\", \"username\"] class UpsertGroup(UpsertBody): \"\"\"The data", "super().create( data_type=cls.data_type, attributes=attributes, meta={\"query\": query}, id_reference=id_reference, ) @property def query(self): \"\"\"The query used", "Schema class UpsertBody(JSONAPIData): data_type = None query_fields = [] @classmethod def create(cls, attributes,", "validator = Schema.get_validator(\"bulk_api/command/upsert_user.json\") data_type = DataType.USER query_fields = [\"authority\", \"username\"] class UpsertGroup(UpsertBody): \"\"\"The", "[\"authority\", \"authority_provided_id\"] class CreateGroupMembership(JSONAPIData): \"\"\"The data to add a user to a group.\"\"\"", "Schema.get_validator(\"bulk_api/command/upsert_user.json\") data_type = DataType.USER query_fields = [\"authority\", \"username\"] class UpsertGroup(UpsertBody): \"\"\"The data to", "group_ref): \"\"\" Create a create group membership body for adding users to groups.", "is a member of this group. :return: A value object with `id` and", "with `id` and `ref` properties. \"\"\" return _IdRef(self.relationships[\"group\"][\"data\"][\"id\"]) class _IdRef: \"\"\"A value object", "which represents an id reference or concrete id.\"\"\" def __init__(self, value): if isinstance(value,", "data to upsert a user.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_user.json\") data_type = DataType.USER query_fields =", "attributes.pop(field, None) for field in cls.query_fields} return super().create( data_type=cls.data_type, attributes=attributes, meta={\"query\": query}, id_reference=id_reference,", "a user.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_user.json\") data_type = DataType.USER query_fields = [\"authority\", \"username\"] class", "\"\"\"The group which this user is a member of. :return: A value object", "validator = Schema.get_validator(\"bulk_api/command/create_group_membership.json\") @classmethod def create(cls, user_ref, group_ref): \"\"\" Create a create group", "None) for field in cls.query_fields} return super().create( data_type=cls.data_type, attributes=attributes, meta={\"query\": query}, id_reference=id_reference, )", "\"\"\"The query used to select which item to update.\"\"\" return self.meta[\"query\"] class UpsertUser(UpsertBody):", "[\"authority\", \"username\"] class UpsertGroup(UpsertBody): \"\"\"The data to upsert a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_group.json\")", "h.h_api.enums import DataType from h.h_api.model.json_api import JSONAPIData from h.h_api.schema import Schema class UpsertBody(JSONAPIData):", "return super().create( DataType.GROUP_MEMBERSHIP, relationships={ \"member\": { \"data\": {\"type\": DataType.USER.value, \"id\": {\"$ref\": user_ref}} },", "from h.h_api.schema import Schema class UpsertBody(JSONAPIData): data_type = None query_fields = [] @classmethod", "[] @classmethod def create(cls, attributes, id_reference): query = {field: attributes.pop(field, None) for field", ":param user_ref: Custom user reference :param group_ref: Custom group reference :return: \"\"\" return", ":param group_ref: Custom group reference :return: \"\"\" return super().create( DataType.GROUP_MEMBERSHIP, relationships={ \"member\": {", "`id` and `ref` properties. \"\"\" return _IdRef(self.relationships[\"group\"][\"data\"][\"id\"]) class _IdRef: \"\"\"A value object which", "}, }, ) @property def member(self): \"\"\"The user which is a member of", ":return: \"\"\" return super().create( DataType.GROUP_MEMBERSHIP, relationships={ \"member\": { \"data\": {\"type\": DataType.USER.value, \"id\": {\"$ref\":", "A value object with `id` and `ref` properties. \"\"\" return _IdRef(self.relationships[\"group\"][\"data\"][\"id\"]) class _IdRef:", "\"\"\" return _IdRef(self.relationships[\"member\"][\"data\"][\"id\"]) @property def group(self): \"\"\"The group which this user is a", "UpsertUser(UpsertBody): \"\"\"The data to upsert a user.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_user.json\") data_type = DataType.USER", "value object with `id` and `ref` properties. \"\"\" return _IdRef(self.relationships[\"member\"][\"data\"][\"id\"]) @property def group(self):", "{ \"data\": {\"type\": DataType.USER.value, \"id\": {\"$ref\": user_ref}} }, \"group\": { \"data\": {\"type\": DataType.GROUP.value,", "\"\"\"The data to upsert a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_group.json\") data_type = DataType.GROUP query_fields", "self.meta[\"query\"] class UpsertUser(UpsertBody): \"\"\"The data to upsert a user.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_user.json\") data_type", "return _IdRef(self.relationships[\"member\"][\"data\"][\"id\"]) @property def group(self): \"\"\"The group which this user is a member", "\"id\": {\"$ref\": user_ref}} }, \"group\": { \"data\": {\"type\": DataType.GROUP.value, \"id\": {\"$ref\": group_ref}} },", "\"authority_provided_id\"] class CreateGroupMembership(JSONAPIData): \"\"\"The data to add a user to a group.\"\"\" validator", "group(self): \"\"\"The group which this user is a member of. :return: A value", "select which item to update.\"\"\" return self.meta[\"query\"] class UpsertUser(UpsertBody): \"\"\"The data to upsert", "group. :return: A value object with `id` and `ref` properties. \"\"\" return _IdRef(self.relationships[\"member\"][\"data\"][\"id\"])", "= DataType.GROUP query_fields = [\"authority\", \"authority_provided_id\"] class CreateGroupMembership(JSONAPIData): \"\"\"The data to add a", "CreateGroupMembership(JSONAPIData): \"\"\"The data to add a user to a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/create_group_membership.json\")", "}, ) @property def member(self): \"\"\"The user which is a member of this", "an id reference or concrete id.\"\"\" def __init__(self, value): if isinstance(value, dict): self.id,", "Schema.get_validator(\"bulk_api/command/upsert_group.json\") data_type = DataType.GROUP query_fields = [\"authority\", \"authority_provided_id\"] class CreateGroupMembership(JSONAPIData): \"\"\"The data to", "UpsertGroup(UpsertBody): \"\"\"The data to upsert a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_group.json\") data_type = DataType.GROUP", "user_ref: Custom user reference :param group_ref: Custom group reference :return: \"\"\" return super().create(", "to upsert a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_group.json\") data_type = DataType.GROUP query_fields = [\"authority\",", "= DataType.USER query_fields = [\"authority\", \"username\"] class UpsertGroup(UpsertBody): \"\"\"The data to upsert a", "class CreateGroupMembership(JSONAPIData): \"\"\"The data to add a user to a group.\"\"\" validator =", "payloads.\"\"\" from h.h_api.enums import DataType from h.h_api.model.json_api import JSONAPIData from h.h_api.schema import Schema", "DataType.GROUP.value, \"id\": {\"$ref\": group_ref}} }, }, ) @property def member(self): \"\"\"The user which", "query(self): \"\"\"The query used to select which item to update.\"\"\" return self.meta[\"query\"] class", "data to upsert a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_group.json\") data_type = DataType.GROUP query_fields =", "data to add a user to a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/create_group_membership.json\") @classmethod def", "}, \"group\": { \"data\": {\"type\": DataType.GROUP.value, \"id\": {\"$ref\": group_ref}} }, }, ) @property", "\"\"\" Create a create group membership body for adding users to groups. :param", "data_type=cls.data_type, attributes=attributes, meta={\"query\": query}, id_reference=id_reference, ) @property def query(self): \"\"\"The query used to", "group membership body for adding users to groups. :param user_ref: Custom user reference", "UpsertBody(JSONAPIData): data_type = None query_fields = [] @classmethod def create(cls, attributes, id_reference): query", "\"\"\"The user which is a member of this group. :return: A value object", "_IdRef: \"\"\"A value object which represents an id reference or concrete id.\"\"\" def", "return self.meta[\"query\"] class UpsertUser(UpsertBody): \"\"\"The data to upsert a user.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_user.json\")", "object with `id` and `ref` properties. \"\"\" return _IdRef(self.relationships[\"member\"][\"data\"][\"id\"]) @property def group(self): \"\"\"The", "value): if isinstance(value, dict): self.id, self.ref = None, value.get(\"$ref\") else: self.id, self.ref =", "import Schema class UpsertBody(JSONAPIData): data_type = None query_fields = [] @classmethod def create(cls,", "reference :return: \"\"\" return super().create( DataType.GROUP_MEMBERSHIP, relationships={ \"member\": { \"data\": {\"type\": DataType.USER.value, \"id\":", "\"data\": {\"type\": DataType.GROUP.value, \"id\": {\"$ref\": group_ref}} }, }, ) @property def member(self): \"\"\"The", "{ \"data\": {\"type\": DataType.GROUP.value, \"id\": {\"$ref\": group_ref}} }, }, ) @property def member(self):", "None query_fields = [] @classmethod def create(cls, attributes, id_reference): query = {field: attributes.pop(field,", "create(cls, user_ref, group_ref): \"\"\" Create a create group membership body for adding users", "DataType.USER query_fields = [\"authority\", \"username\"] class UpsertGroup(UpsertBody): \"\"\"The data to upsert a group.\"\"\"", "= [] @classmethod def create(cls, attributes, id_reference): query = {field: attributes.pop(field, None) for", "`ref` properties. \"\"\" return _IdRef(self.relationships[\"group\"][\"data\"][\"id\"]) class _IdRef: \"\"\"A value object which represents an", "@classmethod def create(cls, attributes, id_reference): query = {field: attributes.pop(field, None) for field in", "return _IdRef(self.relationships[\"group\"][\"data\"][\"id\"]) class _IdRef: \"\"\"A value object which represents an id reference or", "user_ref, group_ref): \"\"\" Create a create group membership body for adding users to", "{field: attributes.pop(field, None) for field in cls.query_fields} return super().create( data_type=cls.data_type, attributes=attributes, meta={\"query\": query},", "for field in cls.query_fields} return super().create( data_type=cls.data_type, attributes=attributes, meta={\"query\": query}, id_reference=id_reference, ) @property", "group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/create_group_membership.json\") @classmethod def create(cls, user_ref, group_ref): \"\"\" Create a create", "\"id\": {\"$ref\": group_ref}} }, }, ) @property def member(self): \"\"\"The user which is", "cls.query_fields} return super().create( data_type=cls.data_type, attributes=attributes, meta={\"query\": query}, id_reference=id_reference, ) @property def query(self): \"\"\"The", "in cls.query_fields} return super().create( data_type=cls.data_type, attributes=attributes, meta={\"query\": query}, id_reference=id_reference, ) @property def query(self):", "query used to select which item to update.\"\"\" return self.meta[\"query\"] class UpsertUser(UpsertBody): \"\"\"The", "\"username\"] class UpsertGroup(UpsertBody): \"\"\"The data to upsert a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_group.json\") data_type", "def create(cls, user_ref, group_ref): \"\"\" Create a create group membership body for adding", "{\"type\": DataType.GROUP.value, \"id\": {\"$ref\": group_ref}} }, }, ) @property def member(self): \"\"\"The user", "to select which item to update.\"\"\" return self.meta[\"query\"] class UpsertUser(UpsertBody): \"\"\"The data to", "_IdRef(self.relationships[\"group\"][\"data\"][\"id\"]) class _IdRef: \"\"\"A value object which represents an id reference or concrete", "isinstance(value, dict): self.id, self.ref = None, value.get(\"$ref\") else: self.id, self.ref = value, None", "to add a user to a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/create_group_membership.json\") @classmethod def create(cls,", "of. :return: A value object with `id` and `ref` properties. \"\"\" return _IdRef(self.relationships[\"group\"][\"data\"][\"id\"])", "group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_group.json\") data_type = DataType.GROUP query_fields = [\"authority\", \"authority_provided_id\"] class CreateGroupMembership(JSONAPIData):", "{\"$ref\": group_ref}} }, }, ) @property def member(self): \"\"\"The user which is a", "\"\"\"The data to upsert a user.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_user.json\") data_type = DataType.USER query_fields", "properties. \"\"\" return _IdRef(self.relationships[\"group\"][\"data\"][\"id\"]) class _IdRef: \"\"\"A value object which represents an id", "member of. :return: A value object with `id` and `ref` properties. \"\"\" return", "h.h_api.schema import Schema class UpsertBody(JSONAPIData): data_type = None query_fields = [] @classmethod def", "user_ref}} }, \"group\": { \"data\": {\"type\": DataType.GROUP.value, \"id\": {\"$ref\": group_ref}} }, }, )", "with `id` and `ref` properties. \"\"\" return _IdRef(self.relationships[\"member\"][\"data\"][\"id\"]) @property def group(self): \"\"\"The group", "\"\"\"Models representing the data modifying payloads.\"\"\" from h.h_api.enums import DataType from h.h_api.model.json_api import", "\"\"\"The data to add a user to a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/create_group_membership.json\") @classmethod", "query}, id_reference=id_reference, ) @property def query(self): \"\"\"The query used to select which item", "a member of this group. :return: A value object with `id` and `ref`", "to upsert a user.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_user.json\") data_type = DataType.USER query_fields = [\"authority\",", "JSONAPIData from h.h_api.schema import Schema class UpsertBody(JSONAPIData): data_type = None query_fields = []", "validator = Schema.get_validator(\"bulk_api/command/upsert_group.json\") data_type = DataType.GROUP query_fields = [\"authority\", \"authority_provided_id\"] class CreateGroupMembership(JSONAPIData): \"\"\"The", "item to update.\"\"\" return self.meta[\"query\"] class UpsertUser(UpsertBody): \"\"\"The data to upsert a user.\"\"\"", ") @property def query(self): \"\"\"The query used to select which item to update.\"\"\"", "user which is a member of this group. :return: A value object with", "is a member of. :return: A value object with `id` and `ref` properties.", "Create a create group membership body for adding users to groups. :param user_ref:", "and `ref` properties. \"\"\" return _IdRef(self.relationships[\"member\"][\"data\"][\"id\"]) @property def group(self): \"\"\"The group which this", "@property def query(self): \"\"\"The query used to select which item to update.\"\"\" return", "create group membership body for adding users to groups. :param user_ref: Custom user", "represents an id reference or concrete id.\"\"\" def __init__(self, value): if isinstance(value, dict):", "member of this group. :return: A value object with `id` and `ref` properties.", "@property def member(self): \"\"\"The user which is a member of this group. :return:", "\"group\": { \"data\": {\"type\": DataType.GROUP.value, \"id\": {\"$ref\": group_ref}} }, }, ) @property def", "object which represents an id reference or concrete id.\"\"\" def __init__(self, value): if", "properties. \"\"\" return _IdRef(self.relationships[\"member\"][\"data\"][\"id\"]) @property def group(self): \"\"\"The group which this user is", "the data modifying payloads.\"\"\" from h.h_api.enums import DataType from h.h_api.model.json_api import JSONAPIData from", "group_ref}} }, }, ) @property def member(self): \"\"\"The user which is a member", "= {field: attributes.pop(field, None) for field in cls.query_fields} return super().create( data_type=cls.data_type, attributes=attributes, meta={\"query\":", "_IdRef(self.relationships[\"member\"][\"data\"][\"id\"]) @property def group(self): \"\"\"The group which this user is a member of.", "group which this user is a member of. :return: A value object with", "data modifying payloads.\"\"\" from h.h_api.enums import DataType from h.h_api.model.json_api import JSONAPIData from h.h_api.schema", "representing the data modifying payloads.\"\"\" from h.h_api.enums import DataType from h.h_api.model.json_api import JSONAPIData", "Custom group reference :return: \"\"\" return super().create( DataType.GROUP_MEMBERSHIP, relationships={ \"member\": { \"data\": {\"type\":", "`id` and `ref` properties. \"\"\" return _IdRef(self.relationships[\"member\"][\"data\"][\"id\"]) @property def group(self): \"\"\"The group which", "return super().create( data_type=cls.data_type, attributes=attributes, meta={\"query\": query}, id_reference=id_reference, ) @property def query(self): \"\"\"The query", "\"member\": { \"data\": {\"type\": DataType.USER.value, \"id\": {\"$ref\": user_ref}} }, \"group\": { \"data\": {\"type\":", "\"data\": {\"type\": DataType.USER.value, \"id\": {\"$ref\": user_ref}} }, \"group\": { \"data\": {\"type\": DataType.GROUP.value, \"id\":", "member(self): \"\"\"The user which is a member of this group. :return: A value", "`ref` properties. \"\"\" return _IdRef(self.relationships[\"member\"][\"data\"][\"id\"]) @property def group(self): \"\"\"The group which this user", "id_reference=id_reference, ) @property def query(self): \"\"\"The query used to select which item to", "\"\"\" return _IdRef(self.relationships[\"group\"][\"data\"][\"id\"]) class _IdRef: \"\"\"A value object which represents an id reference", "\"\"\"A value object which represents an id reference or concrete id.\"\"\" def __init__(self,", "id reference or concrete id.\"\"\" def __init__(self, value): if isinstance(value, dict): self.id, self.ref", "attributes=attributes, meta={\"query\": query}, id_reference=id_reference, ) @property def query(self): \"\"\"The query used to select", "query_fields = [\"authority\", \"username\"] class UpsertGroup(UpsertBody): \"\"\"The data to upsert a group.\"\"\" validator", "Schema.get_validator(\"bulk_api/command/create_group_membership.json\") @classmethod def create(cls, user_ref, group_ref): \"\"\" Create a create group membership body", "\"\"\" return super().create( DataType.GROUP_MEMBERSHIP, relationships={ \"member\": { \"data\": {\"type\": DataType.USER.value, \"id\": {\"$ref\": user_ref}}", "a member of. :return: A value object with `id` and `ref` properties. \"\"\"", "this user is a member of. :return: A value object with `id` and", "from h.h_api.model.json_api import JSONAPIData from h.h_api.schema import Schema class UpsertBody(JSONAPIData): data_type = None", "id.\"\"\" def __init__(self, value): if isinstance(value, dict): self.id, self.ref = None, value.get(\"$ref\") else:", "group_ref: Custom group reference :return: \"\"\" return super().create( DataType.GROUP_MEMBERSHIP, relationships={ \"member\": { \"data\":", "a create group membership body for adding users to groups. :param user_ref: Custom", "query_fields = [\"authority\", \"authority_provided_id\"] class CreateGroupMembership(JSONAPIData): \"\"\"The data to add a user to", "= Schema.get_validator(\"bulk_api/command/create_group_membership.json\") @classmethod def create(cls, user_ref, group_ref): \"\"\" Create a create group membership", "of this group. :return: A value object with `id` and `ref` properties. \"\"\"", "reference or concrete id.\"\"\" def __init__(self, value): if isinstance(value, dict): self.id, self.ref =", "Custom user reference :param group_ref: Custom group reference :return: \"\"\" return super().create( DataType.GROUP_MEMBERSHIP,", "create(cls, attributes, id_reference): query = {field: attributes.pop(field, None) for field in cls.query_fields} return", "for adding users to groups. :param user_ref: Custom user reference :param group_ref: Custom", "__init__(self, value): if isinstance(value, dict): self.id, self.ref = None, value.get(\"$ref\") else: self.id, self.ref", "user is a member of. :return: A value object with `id` and `ref`", "class UpsertBody(JSONAPIData): data_type = None query_fields = [] @classmethod def create(cls, attributes, id_reference):", "update.\"\"\" return self.meta[\"query\"] class UpsertUser(UpsertBody): \"\"\"The data to upsert a user.\"\"\" validator =", "DataType.GROUP query_fields = [\"authority\", \"authority_provided_id\"] class CreateGroupMembership(JSONAPIData): \"\"\"The data to add a user", "query_fields = [] @classmethod def create(cls, attributes, id_reference): query = {field: attributes.pop(field, None)", "super().create( DataType.GROUP_MEMBERSHIP, relationships={ \"member\": { \"data\": {\"type\": DataType.USER.value, \"id\": {\"$ref\": user_ref}} }, \"group\":", "= [\"authority\", \"username\"] class UpsertGroup(UpsertBody): \"\"\"The data to upsert a group.\"\"\" validator =", "{\"$ref\": user_ref}} }, \"group\": { \"data\": {\"type\": DataType.GROUP.value, \"id\": {\"$ref\": group_ref}} }, },", "if isinstance(value, dict): self.id, self.ref = None, value.get(\"$ref\") else: self.id, self.ref = value,", "import JSONAPIData from h.h_api.schema import Schema class UpsertBody(JSONAPIData): data_type = None query_fields =", "= None query_fields = [] @classmethod def create(cls, attributes, id_reference): query = {field:", "used to select which item to update.\"\"\" return self.meta[\"query\"] class UpsertUser(UpsertBody): \"\"\"The data", "h.h_api.model.json_api import JSONAPIData from h.h_api.schema import Schema class UpsertBody(JSONAPIData): data_type = None query_fields", "DataType.USER.value, \"id\": {\"$ref\": user_ref}} }, \"group\": { \"data\": {\"type\": DataType.GROUP.value, \"id\": {\"$ref\": group_ref}}", "reference :param group_ref: Custom group reference :return: \"\"\" return super().create( DataType.GROUP_MEMBERSHIP, relationships={ \"member\":", "def query(self): \"\"\"The query used to select which item to update.\"\"\" return self.meta[\"query\"]", "or concrete id.\"\"\" def __init__(self, value): if isinstance(value, dict): self.id, self.ref = None,", "user.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_user.json\") data_type = DataType.USER query_fields = [\"authority\", \"username\"] class UpsertGroup(UpsertBody):", "a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_group.json\") data_type = DataType.GROUP query_fields = [\"authority\", \"authority_provided_id\"] class", "@classmethod def create(cls, user_ref, group_ref): \"\"\" Create a create group membership body for", "users to groups. :param user_ref: Custom user reference :param group_ref: Custom group reference", "DataType from h.h_api.model.json_api import JSONAPIData from h.h_api.schema import Schema class UpsertBody(JSONAPIData): data_type =", "data_type = DataType.GROUP query_fields = [\"authority\", \"authority_provided_id\"] class CreateGroupMembership(JSONAPIData): \"\"\"The data to add", "adding users to groups. :param user_ref: Custom user reference :param group_ref: Custom group", "which this user is a member of. :return: A value object with `id`", "user to a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/create_group_membership.json\") @classmethod def create(cls, user_ref, group_ref): \"\"\"", "def create(cls, attributes, id_reference): query = {field: attributes.pop(field, None) for field in cls.query_fields}", "DataType.GROUP_MEMBERSHIP, relationships={ \"member\": { \"data\": {\"type\": DataType.USER.value, \"id\": {\"$ref\": user_ref}} }, \"group\": {", "and `ref` properties. \"\"\" return _IdRef(self.relationships[\"group\"][\"data\"][\"id\"]) class _IdRef: \"\"\"A value object which represents", "concrete id.\"\"\" def __init__(self, value): if isinstance(value, dict): self.id, self.ref = None, value.get(\"$ref\")", "membership body for adding users to groups. :param user_ref: Custom user reference :param", "to a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/create_group_membership.json\") @classmethod def create(cls, user_ref, group_ref): \"\"\" Create", "data_type = DataType.USER query_fields = [\"authority\", \"username\"] class UpsertGroup(UpsertBody): \"\"\"The data to upsert", ") @property def member(self): \"\"\"The user which is a member of this group.", "object with `id` and `ref` properties. \"\"\" return _IdRef(self.relationships[\"group\"][\"data\"][\"id\"]) class _IdRef: \"\"\"A value", "{\"type\": DataType.USER.value, \"id\": {\"$ref\": user_ref}} }, \"group\": { \"data\": {\"type\": DataType.GROUP.value, \"id\": {\"$ref\":", ":return: A value object with `id` and `ref` properties. \"\"\" return _IdRef(self.relationships[\"member\"][\"data\"][\"id\"]) @property", "a user to a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/create_group_membership.json\") @classmethod def create(cls, user_ref, group_ref):", "this group. :return: A value object with `id` and `ref` properties. \"\"\" return", "value object which represents an id reference or concrete id.\"\"\" def __init__(self, value):", "modifying payloads.\"\"\" from h.h_api.enums import DataType from h.h_api.model.json_api import JSONAPIData from h.h_api.schema import", "from h.h_api.enums import DataType from h.h_api.model.json_api import JSONAPIData from h.h_api.schema import Schema class", "field in cls.query_fields} return super().create( data_type=cls.data_type, attributes=attributes, meta={\"query\": query}, id_reference=id_reference, ) @property def", "= Schema.get_validator(\"bulk_api/command/upsert_group.json\") data_type = DataType.GROUP query_fields = [\"authority\", \"authority_provided_id\"] class CreateGroupMembership(JSONAPIData): \"\"\"The data", "group reference :return: \"\"\" return super().create( DataType.GROUP_MEMBERSHIP, relationships={ \"member\": { \"data\": {\"type\": DataType.USER.value,", "class UpsertGroup(UpsertBody): \"\"\"The data to upsert a group.\"\"\" validator = Schema.get_validator(\"bulk_api/command/upsert_group.json\") data_type =", ":return: A value object with `id` and `ref` properties. \"\"\" return _IdRef(self.relationships[\"group\"][\"data\"][\"id\"]) class", "meta={\"query\": query}, id_reference=id_reference, ) @property def query(self): \"\"\"The query used to select which", "to groups. :param user_ref: Custom user reference :param group_ref: Custom group reference :return:" ]
[ "test_user_instance(self): pass def test_post_instance(self): pass def test_comment_instance(self): pass if __name__ == '__main__': unittest.main()", "email='<EMAIL>', password='<PASSWORD>') self.new_post = Post() self.new_comment = Comment() def test_user_instance(self): pass def test_post_instance(self):", "User, Post, Comment class PitchTest(unittest.TestCase): def setUp(self): self.new_user = User(username='Olly', email='<EMAIL>', password='<PASSWORD>') self.new_post", "from app.models import User, Post, Comment class PitchTest(unittest.TestCase): def setUp(self): self.new_user = User(username='Olly',", "class PitchTest(unittest.TestCase): def setUp(self): self.new_user = User(username='Olly', email='<EMAIL>', password='<PASSWORD>') self.new_post = Post() self.new_comment", "Comment class PitchTest(unittest.TestCase): def setUp(self): self.new_user = User(username='Olly', email='<EMAIL>', password='<PASSWORD>') self.new_post = Post()", "self.new_post = Post() self.new_comment = Comment() def test_user_instance(self): pass def test_post_instance(self): pass def", "import User, Post, Comment class PitchTest(unittest.TestCase): def setUp(self): self.new_user = User(username='Olly', email='<EMAIL>', password='<PASSWORD>')", "app.models import User, Post, Comment class PitchTest(unittest.TestCase): def setUp(self): self.new_user = User(username='Olly', email='<EMAIL>',", "Comment() def test_user_instance(self): pass def test_post_instance(self): pass def test_comment_instance(self): pass if __name__ ==", "unittest from app.models import User, Post, Comment class PitchTest(unittest.TestCase): def setUp(self): self.new_user =", "def setUp(self): self.new_user = User(username='Olly', email='<EMAIL>', password='<PASSWORD>') self.new_post = Post() self.new_comment = Comment()", "password='<PASSWORD>') self.new_post = Post() self.new_comment = Comment() def test_user_instance(self): pass def test_post_instance(self): pass", "self.new_comment = Comment() def test_user_instance(self): pass def test_post_instance(self): pass def test_comment_instance(self): pass if", "<gh_stars>0 import unittest from app.models import User, Post, Comment class PitchTest(unittest.TestCase): def setUp(self):", "self.new_user = User(username='Olly', email='<EMAIL>', password='<PASSWORD>') self.new_post = Post() self.new_comment = Comment() def test_user_instance(self):", "= User(username='Olly', email='<EMAIL>', password='<PASSWORD>') self.new_post = Post() self.new_comment = Comment() def test_user_instance(self): pass", "PitchTest(unittest.TestCase): def setUp(self): self.new_user = User(username='Olly', email='<EMAIL>', password='<PASSWORD>') self.new_post = Post() self.new_comment =", "= Comment() def test_user_instance(self): pass def test_post_instance(self): pass def test_comment_instance(self): pass if __name__", "import unittest from app.models import User, Post, Comment class PitchTest(unittest.TestCase): def setUp(self): self.new_user", "= Post() self.new_comment = Comment() def test_user_instance(self): pass def test_post_instance(self): pass def test_comment_instance(self):", "Post, Comment class PitchTest(unittest.TestCase): def setUp(self): self.new_user = User(username='Olly', email='<EMAIL>', password='<PASSWORD>') self.new_post =", "def test_user_instance(self): pass def test_post_instance(self): pass def test_comment_instance(self): pass if __name__ == '__main__':", "setUp(self): self.new_user = User(username='Olly', email='<EMAIL>', password='<PASSWORD>') self.new_post = Post() self.new_comment = Comment() def", "Post() self.new_comment = Comment() def test_user_instance(self): pass def test_post_instance(self): pass def test_comment_instance(self): pass", "User(username='Olly', email='<EMAIL>', password='<PASSWORD>') self.new_post = Post() self.new_comment = Comment() def test_user_instance(self): pass def" ]
[ "\"\"\"Construct.\"\"\" pass def transform(self, graphs_list): \"\"\"transform.\"\"\" try: for graphs in graphs_list: transformed_graph =", "nx.Graph() for graph in graphs: graph_global = nx.disjoint_union(graph_global, graph) return graph_global # ------------------------------------------------------------------------------", "\"\"\"Union.\"\"\" def __init__(self, attribute='position'): \"\"\"Construct.\"\"\" self.attribute = attribute def transform(self, graphs_list): \"\"\"transform.\"\"\" try:", "Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) # ------------------------------------------------------------------------------", "iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _union_list(self, graphs_list): for graphs in", "return graph_global # ------------------------------------------------------------------------------ class Union(BaseEstimator, TransformerMixin): \"\"\"Union.\"\"\" def __init__(self, attribute='position'): \"\"\"Construct.\"\"\" self.attribute", "__init__(self): \"\"\"Construct.\"\"\" pass def transform(self, graphs_list): \"\"\"transform.\"\"\" try: for graphs in graphs_list: for", "class Union(BaseEstimator, TransformerMixin): \"\"\"Union.\"\"\" def __init__(self, attribute='position'): \"\"\"Construct.\"\"\" self.attribute = attribute def transform(self,", "in graphs: yield graph except Exception as e: logger.debug('Failed iteration. Reason: %s' %", "for graphs in graphs_list: transformed_graph = self._union(graphs) yield transformed_graph def _union(self, graphs): graph_global", "TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass def transform(self, graphs_list): \"\"\"transform.\"\"\" try: for graphs", "= self._disjoint_union(graphs) yield transformed_graph except Exception as e: logger.debug('Failed iteration. Reason: %s' %", "graph except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True)", "in graphs_list: transformed_graph = self._union(graphs) yield transformed_graph def _union(self, graphs): graph_global = nx.Graph()", "TransformerMixin): \"\"\"Union.\"\"\" def __init__(self, attribute='position'): \"\"\"Construct.\"\"\" self.attribute = attribute def transform(self, graphs_list): \"\"\"transform.\"\"\"", "of all graphs graph_global = nx.Graph() for graph in graphs: graph_global = nx.disjoint_union(graph_global,", "\"\"\"Provides ways to join distinct graphs.\"\"\" from GArDen.transform.contraction import Minor from sklearn.base import", "def transform(self, graphs_list): \"\"\"transform.\"\"\" try: minor = Minor() graphs = self._union_list(graphs_list) return minor.transform(graphs)", "logging.getLogger(__name__) # ------------------------------------------------------------------------------ class Flatten(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass def transform(self,", "%s' % e) logger.debug('Exception', exc_info=True) # ------------------------------------------------------------------------------ class DisjointUnion(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self):", "minor = Minor() graphs = self._union_list(graphs_list) return minor.transform(graphs) except Exception as e: logger.debug('Failed", "for graph in graphs: yield graph except Exception as e: logger.debug('Failed iteration. Reason:", "exc_info=True) def _disjoint_union(self, graphs): # make the disjoint union of all graphs graph_global", "for graphs in graphs_list: transformed_graph = self._disjoint_union(graphs) yield transformed_graph except Exception as e:", "#!/usr/bin/env python \"\"\"Provides ways to join distinct graphs.\"\"\" from GArDen.transform.contraction import Minor from", "# ------------------------------------------------------------------------------ class Union(BaseEstimator, TransformerMixin): \"\"\"Union.\"\"\" def __init__(self, attribute='position'): \"\"\"Construct.\"\"\" self.attribute = attribute", "graphs.\"\"\" from GArDen.transform.contraction import Minor from sklearn.base import BaseEstimator, TransformerMixin import networkx as", "pass def transform(self, graphs_list): \"\"\"transform.\"\"\" try: for graphs in graphs_list: for graph in", "logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) # ------------------------------------------------------------------------------ class DisjointUnion(BaseEstimator, TransformerMixin):", "iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) # ------------------------------------------------------------------------------ class DisjointUnion(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\"", "from GArDen.transform.contraction import Minor from sklearn.base import BaseEstimator, TransformerMixin import networkx as nx", "graphs: graph_global = nx.disjoint_union(graph_global, graph) return graph_global # ------------------------------------------------------------------------------ class Union(BaseEstimator, TransformerMixin): \"\"\"Union.\"\"\"", "graphs): graph_global = nx.Graph() for graph in graphs: graph_global = nx.disjoint_union(graph_global, graph) for", "e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) # ------------------------------------------------------------------------------ class DisjointUnion(BaseEstimator,", "return minor.transform(graphs) except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception',", "sklearn.base import BaseEstimator, TransformerMixin import networkx as nx import logging logger = logging.getLogger(__name__)", "n in graph_global.nodes(): if self.attribute in graph_global.node[n]: graph_global.node[n]['part_id'] = \\ [graph_global.node[n][self.attribute]] graph_global.node[n]['part_name'] =", "transform(self, graphs_list): \"\"\"transform.\"\"\" try: for graphs in graphs_list: for graph in graphs: yield", "try: for graphs in graphs_list: for graph in graphs: yield graph except Exception", "nx.Graph() for graph in graphs: graph_global = nx.disjoint_union(graph_global, graph) for n in graph_global.nodes():", "_disjoint_union(self, graphs): # make the disjoint union of all graphs graph_global = nx.Graph()", "ways to join distinct graphs.\"\"\" from GArDen.transform.contraction import Minor from sklearn.base import BaseEstimator,", "e) logger.debug('Exception', exc_info=True) def _union_list(self, graphs_list): for graphs in graphs_list: transformed_graph = self._union(graphs)", "e) logger.debug('Exception', exc_info=True) # ------------------------------------------------------------------------------ class DisjointUnion(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass", "Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _union_list(self,", "graph_global = nx.Graph() for graph in graphs: graph_global = nx.disjoint_union(graph_global, graph) return graph_global", "% e) logger.debug('Exception', exc_info=True) def _disjoint_union(self, graphs): # make the disjoint union of", "graphs_list: for graph in graphs: yield graph except Exception as e: logger.debug('Failed iteration.", "GArDen.transform.contraction import Minor from sklearn.base import BaseEstimator, TransformerMixin import networkx as nx import", "% e) logger.debug('Exception', exc_info=True) def _union_list(self, graphs_list): for graphs in graphs_list: transformed_graph =", "\"\"\"Construct.\"\"\" pass def transform(self, graphs_list): \"\"\"transform.\"\"\" try: for graphs in graphs_list: for graph", "graph in graphs: graph_global = nx.disjoint_union(graph_global, graph) return graph_global # ------------------------------------------------------------------------------ class Union(BaseEstimator,", "\"\"\"transform.\"\"\" try: minor = Minor() graphs = self._union_list(graphs_list) return minor.transform(graphs) except Exception as", "# ------------------------------------------------------------------------------ class Flatten(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass def transform(self, graphs_list):", "attribute def transform(self, graphs_list): \"\"\"transform.\"\"\" try: minor = Minor() graphs = self._union_list(graphs_list) return", "graph_global = nx.disjoint_union(graph_global, graph) for n in graph_global.nodes(): if self.attribute in graph_global.node[n]: graph_global.node[n]['part_id']", "graphs in graphs_list: transformed_graph = self._disjoint_union(graphs) yield transformed_graph except Exception as e: logger.debug('Failed", "def transform(self, graphs_list): \"\"\"transform.\"\"\" try: for graphs in graphs_list: for graph in graphs:", "the disjoint union of all graphs graph_global = nx.Graph() for graph in graphs:", "transform(self, graphs_list): \"\"\"transform.\"\"\" try: minor = Minor() graphs = self._union_list(graphs_list) return minor.transform(graphs) except", "Reason: %s' % e) logger.debug('Exception', exc_info=True) def _disjoint_union(self, graphs): # make the disjoint", "graph in graphs: graph_global = nx.disjoint_union(graph_global, graph) for n in graph_global.nodes(): if self.attribute", "graphs graph_global = nx.Graph() for graph in graphs: graph_global = nx.disjoint_union(graph_global, graph) return", "for n in graph_global.nodes(): if self.attribute in graph_global.node[n]: graph_global.node[n]['part_id'] = \\ [graph_global.node[n][self.attribute]] graph_global.node[n]['part_name']", "logger.debug('Exception', exc_info=True) def _disjoint_union(self, graphs): # make the disjoint union of all graphs", "distinct graphs.\"\"\" from GArDen.transform.contraction import Minor from sklearn.base import BaseEstimator, TransformerMixin import networkx", "as nx import logging logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ class Flatten(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\"", "self._union(graphs) yield transformed_graph def _union(self, graphs): graph_global = nx.Graph() for graph in graphs:", "def __init__(self): \"\"\"Construct.\"\"\" pass def transform(self, graphs_list): \"\"\"transform.\"\"\" try: for graphs in graphs_list:", "% e) logger.debug('Exception', exc_info=True) # ------------------------------------------------------------------------------ class DisjointUnion(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\"", "graph) for n in graph_global.nodes(): if self.attribute in graph_global.node[n]: graph_global.node[n]['part_id'] = \\ [graph_global.node[n][self.attribute]]", "as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _disjoint_union(self, graphs):", "Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _disjoint_union(self,", "= self._union(graphs) yield transformed_graph def _union(self, graphs): graph_global = nx.Graph() for graph in", "nx.disjoint_union(graph_global, graph) for n in graph_global.nodes(): if self.attribute in graph_global.node[n]: graph_global.node[n]['part_id'] = \\", "union of all graphs graph_global = nx.Graph() for graph in graphs: graph_global =", "\"\"\"transform.\"\"\" try: for graphs in graphs_list: for graph in graphs: yield graph except", "yield transformed_graph def _union(self, graphs): graph_global = nx.Graph() for graph in graphs: graph_global", "------------------------------------------------------------------------------ class Union(BaseEstimator, TransformerMixin): \"\"\"Union.\"\"\" def __init__(self, attribute='position'): \"\"\"Construct.\"\"\" self.attribute = attribute def", "graphs_list: transformed_graph = self._union(graphs) yield transformed_graph def _union(self, graphs): graph_global = nx.Graph() for", "nx import logging logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ class Flatten(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def", "= attribute def transform(self, graphs_list): \"\"\"transform.\"\"\" try: minor = Minor() graphs = self._union_list(graphs_list)", "_union_list(self, graphs_list): for graphs in graphs_list: transformed_graph = self._union(graphs) yield transformed_graph def _union(self,", "attribute='position'): \"\"\"Construct.\"\"\" self.attribute = attribute def transform(self, graphs_list): \"\"\"transform.\"\"\" try: minor = Minor()", "self._union_list(graphs_list) return minor.transform(graphs) except Exception as e: logger.debug('Failed iteration. Reason: %s' % e)", "def transform(self, graphs_list): \"\"\"transform.\"\"\" try: for graphs in graphs_list: transformed_graph = self._disjoint_union(graphs) yield", "join distinct graphs.\"\"\" from GArDen.transform.contraction import Minor from sklearn.base import BaseEstimator, TransformerMixin import", "graphs in graphs_list: for graph in graphs: yield graph except Exception as e:", "DisjointUnion(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass def transform(self, graphs_list): \"\"\"transform.\"\"\" try: for", "iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _disjoint_union(self, graphs): # make the", "graphs in graphs_list: transformed_graph = self._union(graphs) yield transformed_graph def _union(self, graphs): graph_global =", "for graph in graphs: graph_global = nx.disjoint_union(graph_global, graph) return graph_global # ------------------------------------------------------------------------------ class", "graphs = self._union_list(graphs_list) return minor.transform(graphs) except Exception as e: logger.debug('Failed iteration. Reason: %s'", "%s' % e) logger.debug('Exception', exc_info=True) def _union_list(self, graphs_list): for graphs in graphs_list: transformed_graph", "graphs: graph_global = nx.disjoint_union(graph_global, graph) for n in graph_global.nodes(): if self.attribute in graph_global.node[n]:", "minor.transform(graphs) except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True)", "class DisjointUnion(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass def transform(self, graphs_list): \"\"\"transform.\"\"\" try:", "logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _union_list(self, graphs_list): for graphs", "graph_global = nx.Graph() for graph in graphs: graph_global = nx.disjoint_union(graph_global, graph) for n", "Reason: %s' % e) logger.debug('Exception', exc_info=True) # ------------------------------------------------------------------------------ class DisjointUnion(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def", "\"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass def transform(self, graphs_list): \"\"\"transform.\"\"\" try: for graphs in", "e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _union_list(self, graphs_list): for", "exc_info=True) def _union_list(self, graphs_list): for graphs in graphs_list: transformed_graph = self._union(graphs) yield transformed_graph", "------------------------------------------------------------------------------ class DisjointUnion(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass def transform(self, graphs_list): \"\"\"transform.\"\"\"", "if self.attribute in graph_global.node[n]: graph_global.node[n]['part_id'] = \\ [graph_global.node[n][self.attribute]] graph_global.node[n]['part_name'] = \\ [graph_global.node[n]['label']] return", "logger.debug('Exception', exc_info=True) def _union_list(self, graphs_list): for graphs in graphs_list: transformed_graph = self._union(graphs) yield", "%s' % e) logger.debug('Exception', exc_info=True) def _disjoint_union(self, graphs): # make the disjoint union", "nx.disjoint_union(graph_global, graph) return graph_global # ------------------------------------------------------------------------------ class Union(BaseEstimator, TransformerMixin): \"\"\"Union.\"\"\" def __init__(self, attribute='position'):", "python \"\"\"Provides ways to join distinct graphs.\"\"\" from GArDen.transform.contraction import Minor from sklearn.base", "to join distinct graphs.\"\"\" from GArDen.transform.contraction import Minor from sklearn.base import BaseEstimator, TransformerMixin", "= self._union_list(graphs_list) return minor.transform(graphs) except Exception as e: logger.debug('Failed iteration. Reason: %s' %", "import BaseEstimator, TransformerMixin import networkx as nx import logging logger = logging.getLogger(__name__) #", "# make the disjoint union of all graphs graph_global = nx.Graph() for graph", "in graphs: graph_global = nx.disjoint_union(graph_global, graph) for n in graph_global.nodes(): if self.attribute in", "# ------------------------------------------------------------------------------ class DisjointUnion(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass def transform(self, graphs_list):", "class Flatten(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass def transform(self, graphs_list): \"\"\"transform.\"\"\" try:", "transformed_graph = self._disjoint_union(graphs) yield transformed_graph except Exception as e: logger.debug('Failed iteration. Reason: %s'", "yield transformed_graph except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception',", "__init__(self, attribute='position'): \"\"\"Construct.\"\"\" self.attribute = attribute def transform(self, graphs_list): \"\"\"transform.\"\"\" try: minor =", "as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) # ------------------------------------------------------------------------------ class", "graphs_list): \"\"\"transform.\"\"\" try: for graphs in graphs_list: transformed_graph = self._disjoint_union(graphs) yield transformed_graph except", "try: for graphs in graphs_list: transformed_graph = self._disjoint_union(graphs) yield transformed_graph except Exception as", "Flatten(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass def transform(self, graphs_list): \"\"\"transform.\"\"\" try: for", "e) logger.debug('Exception', exc_info=True) def _disjoint_union(self, graphs): # make the disjoint union of all", "make the disjoint union of all graphs graph_global = nx.Graph() for graph in", "disjoint union of all graphs graph_global = nx.Graph() for graph in graphs: graph_global", "= nx.Graph() for graph in graphs: graph_global = nx.disjoint_union(graph_global, graph) return graph_global #", "logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _disjoint_union(self, graphs): # make", "Minor() graphs = self._union_list(graphs_list) return minor.transform(graphs) except Exception as e: logger.debug('Failed iteration. Reason:", "graph) return graph_global # ------------------------------------------------------------------------------ class Union(BaseEstimator, TransformerMixin): \"\"\"Union.\"\"\" def __init__(self, attribute='position'): \"\"\"Construct.\"\"\"", "pass def transform(self, graphs_list): \"\"\"transform.\"\"\" try: for graphs in graphs_list: transformed_graph = self._disjoint_union(graphs)", "e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _disjoint_union(self, graphs): #", "from sklearn.base import BaseEstimator, TransformerMixin import networkx as nx import logging logger =", "transformed_graph def _union(self, graphs): graph_global = nx.Graph() for graph in graphs: graph_global =", "graphs_list): \"\"\"transform.\"\"\" try: for graphs in graphs_list: for graph in graphs: yield graph", "import networkx as nx import logging logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ class Flatten(BaseEstimator,", "graphs_list): \"\"\"transform.\"\"\" try: minor = Minor() graphs = self._union_list(graphs_list) return minor.transform(graphs) except Exception", "as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _union_list(self, graphs_list):", "logging logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ class Flatten(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\"", "in graphs_list: for graph in graphs: yield graph except Exception as e: logger.debug('Failed", "self.attribute in graph_global.node[n]: graph_global.node[n]['part_id'] = \\ [graph_global.node[n][self.attribute]] graph_global.node[n]['part_name'] = \\ [graph_global.node[n]['label']] return graph_global", "import logging logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ class Flatten(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self):", "for graph in graphs: graph_global = nx.disjoint_union(graph_global, graph) for n in graph_global.nodes(): if", "in graphs: graph_global = nx.disjoint_union(graph_global, graph) return graph_global # ------------------------------------------------------------------------------ class Union(BaseEstimator, TransformerMixin):", "__init__(self): \"\"\"Construct.\"\"\" pass def transform(self, graphs_list): \"\"\"transform.\"\"\" try: for graphs in graphs_list: transformed_graph", "Union(BaseEstimator, TransformerMixin): \"\"\"Union.\"\"\" def __init__(self, attribute='position'): \"\"\"Construct.\"\"\" self.attribute = attribute def transform(self, graphs_list):", "= Minor() graphs = self._union_list(graphs_list) return minor.transform(graphs) except Exception as e: logger.debug('Failed iteration.", "transformed_graph except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True)", "graph_global = nx.disjoint_union(graph_global, graph) return graph_global # ------------------------------------------------------------------------------ class Union(BaseEstimator, TransformerMixin): \"\"\"Union.\"\"\" def", "def _union_list(self, graphs_list): for graphs in graphs_list: transformed_graph = self._union(graphs) yield transformed_graph def", "logger.debug('Exception', exc_info=True) # ------------------------------------------------------------------------------ class DisjointUnion(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass def", "graphs_list): for graphs in graphs_list: transformed_graph = self._union(graphs) yield transformed_graph def _union(self, graphs):", "= nx.disjoint_union(graph_global, graph) for n in graph_global.nodes(): if self.attribute in graph_global.node[n]: graph_global.node[n]['part_id'] =", "try: minor = Minor() graphs = self._union_list(graphs_list) return minor.transform(graphs) except Exception as e:", "def _disjoint_union(self, graphs): # make the disjoint union of all graphs graph_global =", "Minor from sklearn.base import BaseEstimator, TransformerMixin import networkx as nx import logging logger", "graph_global.nodes(): if self.attribute in graph_global.node[n]: graph_global.node[n]['part_id'] = \\ [graph_global.node[n][self.attribute]] graph_global.node[n]['part_name'] = \\ [graph_global.node[n]['label']]", "self.attribute = attribute def transform(self, graphs_list): \"\"\"transform.\"\"\" try: minor = Minor() graphs =", "= nx.disjoint_union(graph_global, graph) return graph_global # ------------------------------------------------------------------------------ class Union(BaseEstimator, TransformerMixin): \"\"\"Union.\"\"\" def __init__(self,", "_union(self, graphs): graph_global = nx.Graph() for graph in graphs: graph_global = nx.disjoint_union(graph_global, graph)", "for graphs in graphs_list: for graph in graphs: yield graph except Exception as", "in graphs_list: transformed_graph = self._disjoint_union(graphs) yield transformed_graph except Exception as e: logger.debug('Failed iteration.", "= nx.Graph() for graph in graphs: graph_global = nx.disjoint_union(graph_global, graph) for n in", "graphs_list: transformed_graph = self._disjoint_union(graphs) yield transformed_graph except Exception as e: logger.debug('Failed iteration. Reason:", "graph in graphs: yield graph except Exception as e: logger.debug('Failed iteration. Reason: %s'", "yield graph except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception',", "def __init__(self, attribute='position'): \"\"\"Construct.\"\"\" self.attribute = attribute def transform(self, graphs_list): \"\"\"transform.\"\"\" try: minor", "= logging.getLogger(__name__) # ------------------------------------------------------------------------------ class Flatten(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass def", "graphs: yield graph except Exception as e: logger.debug('Failed iteration. Reason: %s' % e)", "graphs): # make the disjoint union of all graphs graph_global = nx.Graph() for", "self._disjoint_union(graphs) yield transformed_graph except Exception as e: logger.debug('Failed iteration. Reason: %s' % e)", "\"\"\"transform.\"\"\" try: for graphs in graphs_list: transformed_graph = self._disjoint_union(graphs) yield transformed_graph except Exception", "in graph_global.nodes(): if self.attribute in graph_global.node[n]: graph_global.node[n]['part_id'] = \\ [graph_global.node[n][self.attribute]] graph_global.node[n]['part_name'] = \\", "TransformerMixin import networkx as nx import logging logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ class", "networkx as nx import logging logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ class Flatten(BaseEstimator, TransformerMixin):", "exc_info=True) # ------------------------------------------------------------------------------ class DisjointUnion(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass def transform(self,", "graph_global # ------------------------------------------------------------------------------ class Union(BaseEstimator, TransformerMixin): \"\"\"Union.\"\"\" def __init__(self, attribute='position'): \"\"\"Construct.\"\"\" self.attribute =", "import Minor from sklearn.base import BaseEstimator, TransformerMixin import networkx as nx import logging", "except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def", "logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ class Flatten(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass", "except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) #", "transform(self, graphs_list): \"\"\"transform.\"\"\" try: for graphs in graphs_list: transformed_graph = self._disjoint_union(graphs) yield transformed_graph", "\"\"\"Construct.\"\"\" self.attribute = attribute def transform(self, graphs_list): \"\"\"transform.\"\"\" try: minor = Minor() graphs", "------------------------------------------------------------------------------ class Flatten(BaseEstimator, TransformerMixin): \"\"\"DisjointUnion.\"\"\" def __init__(self): \"\"\"Construct.\"\"\" pass def transform(self, graphs_list): \"\"\"transform.\"\"\"", "BaseEstimator, TransformerMixin import networkx as nx import logging logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------", "Reason: %s' % e) logger.debug('Exception', exc_info=True) def _union_list(self, graphs_list): for graphs in graphs_list:", "def _union(self, graphs): graph_global = nx.Graph() for graph in graphs: graph_global = nx.disjoint_union(graph_global,", "transformed_graph = self._union(graphs) yield transformed_graph def _union(self, graphs): graph_global = nx.Graph() for graph", "all graphs graph_global = nx.Graph() for graph in graphs: graph_global = nx.disjoint_union(graph_global, graph)" ]
[ "= body[\"result\"] results = body[\"results\"] results_df = join_results(results) file_name = storage.write_output( session_id, str(uuid.uuid4()),", "from typing import Dict from showwhy_inference.inference import join_results from shared_code.io.storage import get_storage_client storage", "from shared_code.io.storage import get_storage_client storage = get_storage_client() def main(body: Dict): session_id = body[\"session_id\"]", "results_df = join_results(results) file_name = storage.write_output( session_id, str(uuid.uuid4()), results_df, file_type=\"partial\", extension=\"csv\" ) context[result_name]", "body[\"results\"] results_df = join_results(results) file_name = storage.write_output( session_id, str(uuid.uuid4()), results_df, file_type=\"partial\", extension=\"csv\" )", "def main(body: Dict): session_id = body[\"session_id\"] context = storage.read_context(session_id) result_name = body[\"result\"] results", "storage = get_storage_client() def main(body: Dict): session_id = body[\"session_id\"] context = storage.read_context(session_id) result_name", "main(body: Dict): session_id = body[\"session_id\"] context = storage.read_context(session_id) result_name = body[\"result\"] results =", "session_id = body[\"session_id\"] context = storage.read_context(session_id) result_name = body[\"result\"] results = body[\"results\"] results_df", "import Dict from showwhy_inference.inference import join_results from shared_code.io.storage import get_storage_client storage = get_storage_client()", "= body[\"results\"] results_df = join_results(results) file_name = storage.write_output( session_id, str(uuid.uuid4()), results_df, file_type=\"partial\", extension=\"csv\"", "storage.write_output( session_id, str(uuid.uuid4()), results_df, file_type=\"partial\", extension=\"csv\" ) context[result_name] = results_df storage.write_context(context) return {\"output\":", "session_id, str(uuid.uuid4()), results_df, file_type=\"partial\", extension=\"csv\" ) context[result_name] = results_df storage.write_context(context) return {\"output\": file_name}", "MIT license. See LICENSE file in the project. # import uuid from typing", "results = body[\"results\"] results_df = join_results(results) file_name = storage.write_output( session_id, str(uuid.uuid4()), results_df, file_type=\"partial\",", "<filename>python/showwhy-backend/showwhy_backend/InferenceJoinResultActivity/__init__.py # # Copyright (c) Microsoft. All rights reserved. # Licensed under the", "showwhy_inference.inference import join_results from shared_code.io.storage import get_storage_client storage = get_storage_client() def main(body: Dict):", "Dict): session_id = body[\"session_id\"] context = storage.read_context(session_id) result_name = body[\"result\"] results = body[\"results\"]", "= storage.read_context(session_id) result_name = body[\"result\"] results = body[\"results\"] results_df = join_results(results) file_name =", "join_results(results) file_name = storage.write_output( session_id, str(uuid.uuid4()), results_df, file_type=\"partial\", extension=\"csv\" ) context[result_name] = results_df", "Licensed under the MIT license. See LICENSE file in the project. # import", "LICENSE file in the project. # import uuid from typing import Dict from", "shared_code.io.storage import get_storage_client storage = get_storage_client() def main(body: Dict): session_id = body[\"session_id\"] context", "See LICENSE file in the project. # import uuid from typing import Dict", "# Copyright (c) Microsoft. All rights reserved. # Licensed under the MIT license.", "reserved. # Licensed under the MIT license. See LICENSE file in the project.", "= storage.write_output( session_id, str(uuid.uuid4()), results_df, file_type=\"partial\", extension=\"csv\" ) context[result_name] = results_df storage.write_context(context) return", "from showwhy_inference.inference import join_results from shared_code.io.storage import get_storage_client storage = get_storage_client() def main(body:", "(c) Microsoft. All rights reserved. # Licensed under the MIT license. See LICENSE", "storage.read_context(session_id) result_name = body[\"result\"] results = body[\"results\"] results_df = join_results(results) file_name = storage.write_output(", "in the project. # import uuid from typing import Dict from showwhy_inference.inference import", "file in the project. # import uuid from typing import Dict from showwhy_inference.inference", "context = storage.read_context(session_id) result_name = body[\"result\"] results = body[\"results\"] results_df = join_results(results) file_name", "result_name = body[\"result\"] results = body[\"results\"] results_df = join_results(results) file_name = storage.write_output( session_id,", "body[\"result\"] results = body[\"results\"] results_df = join_results(results) file_name = storage.write_output( session_id, str(uuid.uuid4()), results_df,", "import join_results from shared_code.io.storage import get_storage_client storage = get_storage_client() def main(body: Dict): session_id", "body[\"session_id\"] context = storage.read_context(session_id) result_name = body[\"result\"] results = body[\"results\"] results_df = join_results(results)", "All rights reserved. # Licensed under the MIT license. See LICENSE file in", "get_storage_client() def main(body: Dict): session_id = body[\"session_id\"] context = storage.read_context(session_id) result_name = body[\"result\"]", "Microsoft. All rights reserved. # Licensed under the MIT license. See LICENSE file", "the MIT license. See LICENSE file in the project. # import uuid from", "= body[\"session_id\"] context = storage.read_context(session_id) result_name = body[\"result\"] results = body[\"results\"] results_df =", "# Licensed under the MIT license. See LICENSE file in the project. #", "# import uuid from typing import Dict from showwhy_inference.inference import join_results from shared_code.io.storage", "= join_results(results) file_name = storage.write_output( session_id, str(uuid.uuid4()), results_df, file_type=\"partial\", extension=\"csv\" ) context[result_name] =", "project. # import uuid from typing import Dict from showwhy_inference.inference import join_results from", "Copyright (c) Microsoft. All rights reserved. # Licensed under the MIT license. See", "# # Copyright (c) Microsoft. All rights reserved. # Licensed under the MIT", "import uuid from typing import Dict from showwhy_inference.inference import join_results from shared_code.io.storage import", "typing import Dict from showwhy_inference.inference import join_results from shared_code.io.storage import get_storage_client storage =", "license. See LICENSE file in the project. # import uuid from typing import", "Dict from showwhy_inference.inference import join_results from shared_code.io.storage import get_storage_client storage = get_storage_client() def", "under the MIT license. See LICENSE file in the project. # import uuid", "the project. # import uuid from typing import Dict from showwhy_inference.inference import join_results", "uuid from typing import Dict from showwhy_inference.inference import join_results from shared_code.io.storage import get_storage_client", "file_name = storage.write_output( session_id, str(uuid.uuid4()), results_df, file_type=\"partial\", extension=\"csv\" ) context[result_name] = results_df storage.write_context(context)", "rights reserved. # Licensed under the MIT license. See LICENSE file in the", "= get_storage_client() def main(body: Dict): session_id = body[\"session_id\"] context = storage.read_context(session_id) result_name =", "join_results from shared_code.io.storage import get_storage_client storage = get_storage_client() def main(body: Dict): session_id =", "import get_storage_client storage = get_storage_client() def main(body: Dict): session_id = body[\"session_id\"] context =", "get_storage_client storage = get_storage_client() def main(body: Dict): session_id = body[\"session_id\"] context = storage.read_context(session_id)" ]
[ "AppKey=AppKey) # 获取sign r = requests.post(url=url, params=params) # 获取分析结果 result = json.loads(r.text) #", "json import time import random import hashlib from urllib import parse from collections", "i in range(10)]), 'sign': '', 'text': one_text} params['sign'] = cal_sign(params_raw=params, AppKey=AppKey) # 获取sign", "tencent import pandas as pd import numpy as np import requests import json", "= {'app_id': AppID, 'time_stamp': int(time.time()), 'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for i in range(10)]), 'sign': '',", "= {'app_id': '10000', # 'time_stamp': '1493449657', # 'nonce_str': '20e3408a79', # 'key1': '腾讯AI开放平台', #", "'sign': '', 'text': one_text} params['sign'] = cal_sign(params_raw=params, AppKey=AppKey) # 获取sign r = requests.post(url=url,", "params[i] = params_raw[i] newurl = parse.urlencode(params) newurl += ('&app_key=' + AppKey) sign =", "= tencent['api']['nlp_textpolar']['url'] results = [] # 逐句调用接口判断 count_i=0 for one_text in texts: params", "texts: 需要打标签的文档列表 :param AppID: 腾讯ai账号信息,默认调用配置文件id_1 :param AppKey: 腾讯ai账号信息,默认调用配置文件id_1 :return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功 ''' url =", "'key2': '示例仅供参考', # 'sign': ''} # AppKey = '<KEY>' # cal_sign(params_raw=params_raw, # AppKey=AppKey)", ":param AppKey: 腾讯ai账号信息,默认调用配置文件id_1 :return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功 ''' url = tencent['api']['nlp_textpolar']['url'] results = [] #", "50 == 0: print('tencent finish:%d' % (count_i)) return results if __name__ == '__main__':", "params_raw[i] newurl = parse.urlencode(params) newurl += ('&app_key=' + AppKey) sign = hashlib.md5(newurl.encode(\"latin1\")).hexdigest().upper() return", "# 'nonce_str': '20e3408a79', # 'key1': '腾讯AI开放平台', # 'key2': '示例仅供参考', # 'sign': ''} #", "time import random import hashlib from urllib import parse from collections import OrderedDict", "返回:BE918C28827E0783D1E5F8E6D7C37A61 params = OrderedDict() for i in sorted(params_raw): if params_raw[i] != '': params[i]", "int(time.time()), 'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for i in range(10)]), 'sign': '', 'text': one_text} params['sign'] =", "% 50 == 0: print('tencent finish:%d' % (count_i)) return results if __name__ ==", "numpy as np import requests import json import time import random import hashlib", "# 'key2': '示例仅供参考', # 'sign': ''} # AppKey = '<KEY>' # cal_sign(params_raw=params_raw, #", "'text': one_text} params['sign'] = cal_sign(params_raw=params, AppKey=AppKey) # 获取sign r = requests.post(url=url, params=params) #", "requests.post(url=url, params=params) # 获取分析结果 result = json.loads(r.text) # print(result) results.append([one_text, result['data']['polar'], result['data']['confd'], result['ret'],", "hashlib from urllib import parse from collections import OrderedDict AppID = tencent['account']['id_1']['APP_ID'] AppKey", "print('tencent finish:%d' % (count_i)) return results if __name__ == '__main__': results = creat_label(texts=['价格便宜啦,比原来优惠多了',", "# params_raw = {'app_id': '10000', # 'time_stamp': '1493449657', # 'nonce_str': '20e3408a79', # 'key1':", "sign = hashlib.md5(newurl.encode(\"latin1\")).hexdigest().upper() return sign def creat_label(texts, AppID=AppID, AppKey=AppKey): ''' :param texts: 需要打标签的文档列表", "parse.urlencode(params) newurl += ('&app_key=' + AppKey) sign = hashlib.md5(newurl.encode(\"latin1\")).hexdigest().upper() return sign def creat_label(texts,", "result['msg'] ]) r.close() count_i += 1 if count_i % 50 == 0: print('tencent", "= tencent['account']['id_1']['AppKey'] def cal_sign(params_raw,AppKey=AppKey): # 官方文档例子为php,给出python版本 # params_raw = {'app_id': '10000', # 'time_stamp':", "AppKey = tencent['account']['id_1']['AppKey'] def cal_sign(params_raw,AppKey=AppKey): # 官方文档例子为php,给出python版本 # params_raw = {'app_id': '10000', #", "params = OrderedDict() for i in sorted(params_raw): if params_raw[i] != '': params[i] =", "as pd import numpy as np import requests import json import time import", "def cal_sign(params_raw,AppKey=AppKey): # 官方文档例子为php,给出python版本 # params_raw = {'app_id': '10000', # 'time_stamp': '1493449657', #", "'key1': '腾讯AI开放平台', # 'key2': '示例仅供参考', # 'sign': ''} # AppKey = '<KEY>' #", "import parse from collections import OrderedDict AppID = tencent['account']['id_1']['APP_ID'] AppKey = tencent['account']['id_1']['AppKey'] def", "from urllib import parse from collections import OrderedDict AppID = tencent['account']['id_1']['APP_ID'] AppKey =", "import OrderedDict AppID = tencent['account']['id_1']['APP_ID'] AppKey = tencent['account']['id_1']['AppKey'] def cal_sign(params_raw,AppKey=AppKey): # 官方文档例子为php,给出python版本 #", ":param AppID: 腾讯ai账号信息,默认调用配置文件id_1 :param AppKey: 腾讯ai账号信息,默认调用配置文件id_1 :return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功 ''' url = tencent['api']['nlp_textpolar']['url'] results", "= tencent['account']['id_1']['APP_ID'] AppKey = tencent['account']['id_1']['AppKey'] def cal_sign(params_raw,AppKey=AppKey): # 官方文档例子为php,给出python版本 # params_raw = {'app_id':", "'<KEY>' # cal_sign(params_raw=params_raw, # AppKey=AppKey) # 返回:BE918C28827E0783D1E5F8E6D7C37A61 params = OrderedDict() for i in", "import pandas as pd import numpy as np import requests import json import", "def creat_label(texts, AppID=AppID, AppKey=AppKey): ''' :param texts: 需要打标签的文档列表 :param AppID: 腾讯ai账号信息,默认调用配置文件id_1 :param AppKey:", "params = {'app_id': AppID, 'time_stamp': int(time.time()), 'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for i in range(10)]), 'sign':", "= cal_sign(params_raw=params, AppKey=AppKey) # 获取sign r = requests.post(url=url, params=params) # 获取分析结果 result =", "获取sign r = requests.post(url=url, params=params) # 获取分析结果 result = json.loads(r.text) # print(result) results.append([one_text,", "import hashlib from urllib import parse from collections import OrderedDict AppID = tencent['account']['id_1']['APP_ID']", "'label', 'confidence', 'ret', 'msg']) results['label'] = np.where(results['label'] == 1, '正面', np.where(results['label'] == 0,", "腾讯ai账号信息,默认调用配置文件id_1 :return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功 ''' url = tencent['api']['nlp_textpolar']['url'] results = [] # 逐句调用接口判断 count_i=0", "'10000', # 'time_stamp': '1493449657', # 'nonce_str': '20e3408a79', # 'key1': '腾讯AI开放平台', # 'key2': '示例仅供参考',", "腾讯ai账号信息,默认调用配置文件id_1 :param AppKey: 腾讯ai账号信息,默认调用配置文件id_1 :return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功 ''' url = tencent['api']['nlp_textpolar']['url'] results = []", "count_i=0 for one_text in texts: params = {'app_id': AppID, 'time_stamp': int(time.time()), 'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz')", "{'app_id': AppID, 'time_stamp': int(time.time()), 'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for i in range(10)]), 'sign': '', 'text':", "逐句调用接口判断 count_i=0 for one_text in texts: params = {'app_id': AppID, 'time_stamp': int(time.time()), 'nonce_str':", "# 获取分析结果 result = json.loads(r.text) # print(result) results.append([one_text, result['data']['polar'], result['data']['confd'], result['ret'], result['msg'] ])", "random import hashlib from urllib import parse from collections import OrderedDict AppID =", "result['data']['confd'], result['ret'], result['msg'] ]) r.close() count_i += 1 if count_i % 50 ==", "获取分析结果 result = json.loads(r.text) # print(result) results.append([one_text, result['data']['polar'], result['data']['confd'], result['ret'], result['msg'] ]) r.close()", "params=params) # 获取分析结果 result = json.loads(r.text) # print(result) results.append([one_text, result['data']['polar'], result['data']['confd'], result['ret'], result['msg']", "from SentimentAnalysis.creat_data.config import tencent import pandas as pd import numpy as np import", "requests import json import time import random import hashlib from urllib import parse", "import numpy as np import requests import json import time import random import", "= creat_label(texts=['价格便宜啦,比原来优惠多了', '壁挂效果差,果然一分价钱一分货', '东西一般般,诶呀', '讨厌你', '一般']) results = pd.DataFrame(results, columns=['evaluation', 'label', 'confidence', 'ret',", "'20e3408a79', # 'key1': '腾讯AI开放平台', # 'key2': '示例仅供参考', # 'sign': ''} # AppKey =", "cal_sign(params_raw=params_raw, # AppKey=AppKey) # 返回:BE918C28827E0783D1E5F8E6D7C37A61 params = OrderedDict() for i in sorted(params_raw): if", "one_text} params['sign'] = cal_sign(params_raw=params, AppKey=AppKey) # 获取sign r = requests.post(url=url, params=params) # 获取分析结果", "= OrderedDict() for i in sorted(params_raw): if params_raw[i] != '': params[i] = params_raw[i]", "count_i += 1 if count_i % 50 == 0: print('tencent finish:%d' % (count_i))", "AppKey=AppKey) # 返回:BE918C28827E0783D1E5F8E6D7C37A61 params = OrderedDict() for i in sorted(params_raw): if params_raw[i] !=", "one_text in texts: params = {'app_id': AppID, 'time_stamp': int(time.time()), 'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for i", "# 返回:BE918C28827E0783D1E5F8E6D7C37A61 params = OrderedDict() for i in sorted(params_raw): if params_raw[i] != '':", "AppID = tencent['account']['id_1']['APP_ID'] AppKey = tencent['account']['id_1']['AppKey'] def cal_sign(params_raw,AppKey=AppKey): # 官方文档例子为php,给出python版本 # params_raw =", "creat_label(texts, AppID=AppID, AppKey=AppKey): ''' :param texts: 需要打标签的文档列表 :param AppID: 腾讯ai账号信息,默认调用配置文件id_1 :param AppKey: 腾讯ai账号信息,默认调用配置文件id_1", "pandas as pd import numpy as np import requests import json import time", "'nonce_str': '20e3408a79', # 'key1': '腾讯AI开放平台', # 'key2': '示例仅供参考', # 'sign': ''} # AppKey", "1 if count_i % 50 == 0: print('tencent finish:%d' % (count_i)) return results", "finish:%d' % (count_i)) return results if __name__ == '__main__': results = creat_label(texts=['价格便宜啦,比原来优惠多了', '壁挂效果差,果然一分价钱一分货',", "columns=['evaluation', 'label', 'confidence', 'ret', 'msg']) results['label'] = np.where(results['label'] == 1, '正面', np.where(results['label'] ==", "if count_i % 50 == 0: print('tencent finish:%d' % (count_i)) return results if", "np import requests import json import time import random import hashlib from urllib", "pd import numpy as np import requests import json import time import random", "in range(10)]), 'sign': '', 'text': one_text} params['sign'] = cal_sign(params_raw=params, AppKey=AppKey) # 获取sign r", "'一般']) results = pd.DataFrame(results, columns=['evaluation', 'label', 'confidence', 'ret', 'msg']) results['label'] = np.where(results['label'] ==", "tencent['account']['id_1']['AppKey'] def cal_sign(params_raw,AppKey=AppKey): # 官方文档例子为php,给出python版本 # params_raw = {'app_id': '10000', # 'time_stamp': '1493449657',", "!= '': params[i] = params_raw[i] newurl = parse.urlencode(params) newurl += ('&app_key=' + AppKey)", "# cal_sign(params_raw=params_raw, # AppKey=AppKey) # 返回:BE918C28827E0783D1E5F8E6D7C37A61 params = OrderedDict() for i in sorted(params_raw):", "creat_label(texts=['价格便宜啦,比原来优惠多了', '壁挂效果差,果然一分价钱一分货', '东西一般般,诶呀', '讨厌你', '一般']) results = pd.DataFrame(results, columns=['evaluation', 'label', 'confidence', 'ret', 'msg'])", "in sorted(params_raw): if params_raw[i] != '': params[i] = params_raw[i] newurl = parse.urlencode(params) newurl", "AppKey) sign = hashlib.md5(newurl.encode(\"latin1\")).hexdigest().upper() return sign def creat_label(texts, AppID=AppID, AppKey=AppKey): ''' :param texts:", "'ret', 'msg']) results['label'] = np.where(results['label'] == 1, '正面', np.where(results['label'] == 0, '中性', '负面'))", "('&app_key=' + AppKey) sign = hashlib.md5(newurl.encode(\"latin1\")).hexdigest().upper() return sign def creat_label(texts, AppID=AppID, AppKey=AppKey): '''", "print(result) results.append([one_text, result['data']['polar'], result['data']['confd'], result['ret'], result['msg'] ]) r.close() count_i += 1 if count_i", "OrderedDict() for i in sorted(params_raw): if params_raw[i] != '': params[i] = params_raw[i] newurl", "''' url = tencent['api']['nlp_textpolar']['url'] results = [] # 逐句调用接口判断 count_i=0 for one_text in", "# 'time_stamp': '1493449657', # 'nonce_str': '20e3408a79', # 'key1': '腾讯AI开放平台', # 'key2': '示例仅供参考', #", "import json import time import random import hashlib from urllib import parse from", "parse from collections import OrderedDict AppID = tencent['account']['id_1']['APP_ID'] AppKey = tencent['account']['id_1']['AppKey'] def cal_sign(params_raw,AppKey=AppKey):", "if __name__ == '__main__': results = creat_label(texts=['价格便宜啦,比原来优惠多了', '壁挂效果差,果然一分价钱一分货', '东西一般般,诶呀', '讨厌你', '一般']) results =", "i in sorted(params_raw): if params_raw[i] != '': params[i] = params_raw[i] newurl = parse.urlencode(params)", "result = json.loads(r.text) # print(result) results.append([one_text, result['data']['polar'], result['data']['confd'], result['ret'], result['msg'] ]) r.close() count_i", "params_raw = {'app_id': '10000', # 'time_stamp': '1493449657', # 'nonce_str': '20e3408a79', # 'key1': '腾讯AI开放平台',", "cal_sign(params_raw=params, AppKey=AppKey) # 获取sign r = requests.post(url=url, params=params) # 获取分析结果 result = json.loads(r.text)", "== '__main__': results = creat_label(texts=['价格便宜啦,比原来优惠多了', '壁挂效果差,果然一分价钱一分货', '东西一般般,诶呀', '讨厌你', '一般']) results = pd.DataFrame(results, columns=['evaluation',", "count_i % 50 == 0: print('tencent finish:%d' % (count_i)) return results if __name__", "__name__ == '__main__': results = creat_label(texts=['价格便宜啦,比原来优惠多了', '壁挂效果差,果然一分价钱一分货', '东西一般般,诶呀', '讨厌你', '一般']) results = pd.DataFrame(results,", "import random import hashlib from urllib import parse from collections import OrderedDict AppID", "= pd.DataFrame(results, columns=['evaluation', 'label', 'confidence', 'ret', 'msg']) results['label'] = np.where(results['label'] == 1, '正面',", "'讨厌你', '一般']) results = pd.DataFrame(results, columns=['evaluation', 'label', 'confidence', 'ret', 'msg']) results['label'] = np.where(results['label']", "results = [] # 逐句调用接口判断 count_i=0 for one_text in texts: params = {'app_id':", "newurl += ('&app_key=' + AppKey) sign = hashlib.md5(newurl.encode(\"latin1\")).hexdigest().upper() return sign def creat_label(texts, AppID=AppID,", "AppKey: 腾讯ai账号信息,默认调用配置文件id_1 :return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功 ''' url = tencent['api']['nlp_textpolar']['url'] results = [] # 逐句调用接口判断", "'time_stamp': '1493449657', # 'nonce_str': '20e3408a79', # 'key1': '腾讯AI开放平台', # 'key2': '示例仅供参考', # 'sign':", "'sign': ''} # AppKey = '<KEY>' # cal_sign(params_raw=params_raw, # AppKey=AppKey) # 返回:BE918C28827E0783D1E5F8E6D7C37A61 params", "== 0: print('tencent finish:%d' % (count_i)) return results if __name__ == '__main__': results", "return results if __name__ == '__main__': results = creat_label(texts=['价格便宜啦,比原来优惠多了', '壁挂效果差,果然一分价钱一分货', '东西一般般,诶呀', '讨厌你', '一般'])", "'壁挂效果差,果然一分价钱一分货', '东西一般般,诶呀', '讨厌你', '一般']) results = pd.DataFrame(results, columns=['evaluation', 'label', 'confidence', 'ret', 'msg']) results['label']", "'腾讯AI开放平台', # 'key2': '示例仅供参考', # 'sign': ''} # AppKey = '<KEY>' # cal_sign(params_raw=params_raw,", "= [] # 逐句调用接口判断 count_i=0 for one_text in texts: params = {'app_id': AppID,", "AppID, 'time_stamp': int(time.time()), 'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for i in range(10)]), 'sign': '', 'text': one_text}", "# 官方文档例子为php,给出python版本 # params_raw = {'app_id': '10000', # 'time_stamp': '1493449657', # 'nonce_str': '20e3408a79',", "= json.loads(r.text) # print(result) results.append([one_text, result['data']['polar'], result['data']['confd'], result['ret'], result['msg'] ]) r.close() count_i +=", "+= ('&app_key=' + AppKey) sign = hashlib.md5(newurl.encode(\"latin1\")).hexdigest().upper() return sign def creat_label(texts, AppID=AppID, AppKey=AppKey):", "for i in range(10)]), 'sign': '', 'text': one_text} params['sign'] = cal_sign(params_raw=params, AppKey=AppKey) #", "results if __name__ == '__main__': results = creat_label(texts=['价格便宜啦,比原来优惠多了', '壁挂效果差,果然一分价钱一分货', '东西一般般,诶呀', '讨厌你', '一般']) results", "AppID=AppID, AppKey=AppKey): ''' :param texts: 需要打标签的文档列表 :param AppID: 腾讯ai账号信息,默认调用配置文件id_1 :param AppKey: 腾讯ai账号信息,默认调用配置文件id_1 :return:", "官方文档例子为php,给出python版本 # params_raw = {'app_id': '10000', # 'time_stamp': '1493449657', # 'nonce_str': '20e3408a79', #", "= hashlib.md5(newurl.encode(\"latin1\")).hexdigest().upper() return sign def creat_label(texts, AppID=AppID, AppKey=AppKey): ''' :param texts: 需要打标签的文档列表 :param", "= params_raw[i] newurl = parse.urlencode(params) newurl += ('&app_key=' + AppKey) sign = hashlib.md5(newurl.encode(\"latin1\")).hexdigest().upper()", "'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for i in range(10)]), 'sign': '', 'text': one_text} params['sign'] = cal_sign(params_raw=params,", "0: print('tencent finish:%d' % (count_i)) return results if __name__ == '__main__': results =", "OrderedDict AppID = tencent['account']['id_1']['APP_ID'] AppKey = tencent['account']['id_1']['AppKey'] def cal_sign(params_raw,AppKey=AppKey): # 官方文档例子为php,给出python版本 # params_raw", "params_raw[i] != '': params[i] = params_raw[i] newurl = parse.urlencode(params) newurl += ('&app_key=' +", "[] # 逐句调用接口判断 count_i=0 for one_text in texts: params = {'app_id': AppID, 'time_stamp':", ":param texts: 需要打标签的文档列表 :param AppID: 腾讯ai账号信息,默认调用配置文件id_1 :param AppKey: 腾讯ai账号信息,默认调用配置文件id_1 :return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功 ''' url", "tencent['account']['id_1']['APP_ID'] AppKey = tencent['account']['id_1']['AppKey'] def cal_sign(params_raw,AppKey=AppKey): # 官方文档例子为php,给出python版本 # params_raw = {'app_id': '10000',", "params['sign'] = cal_sign(params_raw=params, AppKey=AppKey) # 获取sign r = requests.post(url=url, params=params) # 获取分析结果 result", "cal_sign(params_raw,AppKey=AppKey): # 官方文档例子为php,给出python版本 # params_raw = {'app_id': '10000', # 'time_stamp': '1493449657', # 'nonce_str':", "'1493449657', # 'nonce_str': '20e3408a79', # 'key1': '腾讯AI开放平台', # 'key2': '示例仅供参考', # 'sign': ''}", "r.close() count_i += 1 if count_i % 50 == 0: print('tencent finish:%d' %", ":return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功 ''' url = tencent['api']['nlp_textpolar']['url'] results = [] # 逐句调用接口判断 count_i=0 for", "result['ret'], result['msg'] ]) r.close() count_i += 1 if count_i % 50 == 0:", "for i in sorted(params_raw): if params_raw[i] != '': params[i] = params_raw[i] newurl =", "= parse.urlencode(params) newurl += ('&app_key=' + AppKey) sign = hashlib.md5(newurl.encode(\"latin1\")).hexdigest().upper() return sign def", "import time import random import hashlib from urllib import parse from collections import", "pd.DataFrame(results, columns=['evaluation', 'label', 'confidence', 'ret', 'msg']) results['label'] = np.where(results['label'] == 1, '正面', np.where(results['label']", "if params_raw[i] != '': params[i] = params_raw[i] newurl = parse.urlencode(params) newurl += ('&app_key='", "results.append([one_text, result['data']['polar'], result['data']['confd'], result['ret'], result['msg'] ]) r.close() count_i += 1 if count_i %", "+= 1 if count_i % 50 == 0: print('tencent finish:%d' % (count_i)) return", "return sign def creat_label(texts, AppID=AppID, AppKey=AppKey): ''' :param texts: 需要打标签的文档列表 :param AppID: 腾讯ai账号信息,默认调用配置文件id_1", "''} # AppKey = '<KEY>' # cal_sign(params_raw=params_raw, # AppKey=AppKey) # 返回:BE918C28827E0783D1E5F8E6D7C37A61 params =", "import tencent import pandas as pd import numpy as np import requests import", "AppID: 腾讯ai账号信息,默认调用配置文件id_1 :param AppKey: 腾讯ai账号信息,默认调用配置文件id_1 :return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功 ''' url = tencent['api']['nlp_textpolar']['url'] results =", "# 'sign': ''} # AppKey = '<KEY>' # cal_sign(params_raw=params_raw, # AppKey=AppKey) # 返回:BE918C28827E0783D1E5F8E6D7C37A61", "SentimentAnalysis.creat_data.config import tencent import pandas as pd import numpy as np import requests", "result['data']['polar'], result['data']['confd'], result['ret'], result['msg'] ]) r.close() count_i += 1 if count_i % 50", "= requests.post(url=url, params=params) # 获取分析结果 result = json.loads(r.text) # print(result) results.append([one_text, result['data']['polar'], result['data']['confd'],", "AppKey=AppKey): ''' :param texts: 需要打标签的文档列表 :param AppID: 腾讯ai账号信息,默认调用配置文件id_1 :param AppKey: 腾讯ai账号信息,默认调用配置文件id_1 :return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功", "for one_text in texts: params = {'app_id': AppID, 'time_stamp': int(time.time()), 'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for", "'': params[i] = params_raw[i] newurl = parse.urlencode(params) newurl += ('&app_key=' + AppKey) sign", "]) r.close() count_i += 1 if count_i % 50 == 0: print('tencent finish:%d'", "'__main__': results = creat_label(texts=['价格便宜啦,比原来优惠多了', '壁挂效果差,果然一分价钱一分货', '东西一般般,诶呀', '讨厌你', '一般']) results = pd.DataFrame(results, columns=['evaluation', 'label',", "range(10)]), 'sign': '', 'text': one_text} params['sign'] = cal_sign(params_raw=params, AppKey=AppKey) # 获取sign r =", "'', 'text': one_text} params['sign'] = cal_sign(params_raw=params, AppKey=AppKey) # 获取sign r = requests.post(url=url, params=params)", "sign def creat_label(texts, AppID=AppID, AppKey=AppKey): ''' :param texts: 需要打标签的文档列表 :param AppID: 腾讯ai账号信息,默认调用配置文件id_1 :param", "# AppKey = '<KEY>' # cal_sign(params_raw=params_raw, # AppKey=AppKey) # 返回:BE918C28827E0783D1E5F8E6D7C37A61 params = OrderedDict()", "# print(result) results.append([one_text, result['data']['polar'], result['data']['confd'], result['ret'], result['msg'] ]) r.close() count_i += 1 if", "results = creat_label(texts=['价格便宜啦,比原来优惠多了', '壁挂效果差,果然一分价钱一分货', '东西一般般,诶呀', '讨厌你', '一般']) results = pd.DataFrame(results, columns=['evaluation', 'label', 'confidence',", "# 逐句调用接口判断 count_i=0 for one_text in texts: params = {'app_id': AppID, 'time_stamp': int(time.time()),", "texts: params = {'app_id': AppID, 'time_stamp': int(time.time()), 'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for i in range(10)]),", "''' :param texts: 需要打标签的文档列表 :param AppID: 腾讯ai账号信息,默认调用配置文件id_1 :param AppKey: 腾讯ai账号信息,默认调用配置文件id_1 :return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功 '''", "in texts: params = {'app_id': AppID, 'time_stamp': int(time.time()), 'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for i in", "'东西一般般,诶呀', '讨厌你', '一般']) results = pd.DataFrame(results, columns=['evaluation', 'label', 'confidence', 'ret', 'msg']) results['label'] =", "# 'key1': '腾讯AI开放平台', # 'key2': '示例仅供参考', # 'sign': ''} # AppKey = '<KEY>'", "hashlib.md5(newurl.encode(\"latin1\")).hexdigest().upper() return sign def creat_label(texts, AppID=AppID, AppKey=AppKey): ''' :param texts: 需要打标签的文档列表 :param AppID:", "json.loads(r.text) # print(result) results.append([one_text, result['data']['polar'], result['data']['confd'], result['ret'], result['msg'] ]) r.close() count_i += 1", "results = pd.DataFrame(results, columns=['evaluation', 'label', 'confidence', 'ret', 'msg']) results['label'] = np.where(results['label'] == 1,", "collections import OrderedDict AppID = tencent['account']['id_1']['APP_ID'] AppKey = tencent['account']['id_1']['AppKey'] def cal_sign(params_raw,AppKey=AppKey): # 官方文档例子为php,给出python版本", "'time_stamp': int(time.time()), 'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for i in range(10)]), 'sign': '', 'text': one_text} params['sign']", "AppKey = '<KEY>' # cal_sign(params_raw=params_raw, # AppKey=AppKey) # 返回:BE918C28827E0783D1E5F8E6D7C37A61 params = OrderedDict() for", "tencent['api']['nlp_textpolar']['url'] results = [] # 逐句调用接口判断 count_i=0 for one_text in texts: params =", "% (count_i)) return results if __name__ == '__main__': results = creat_label(texts=['价格便宜啦,比原来优惠多了', '壁挂效果差,果然一分价钱一分货', '东西一般般,诶呀',", "= '<KEY>' # cal_sign(params_raw=params_raw, # AppKey=AppKey) # 返回:BE918C28827E0783D1E5F8E6D7C37A61 params = OrderedDict() for i", "'confidence', 'ret', 'msg']) results['label'] = np.where(results['label'] == 1, '正面', np.where(results['label'] == 0, '中性',", "sorted(params_raw): if params_raw[i] != '': params[i] = params_raw[i] newurl = parse.urlencode(params) newurl +=", "r = requests.post(url=url, params=params) # 获取分析结果 result = json.loads(r.text) # print(result) results.append([one_text, result['data']['polar'],", "'示例仅供参考', # 'sign': ''} # AppKey = '<KEY>' # cal_sign(params_raw=params_raw, # AppKey=AppKey) #", "newurl = parse.urlencode(params) newurl += ('&app_key=' + AppKey) sign = hashlib.md5(newurl.encode(\"latin1\")).hexdigest().upper() return sign", "+ AppKey) sign = hashlib.md5(newurl.encode(\"latin1\")).hexdigest().upper() return sign def creat_label(texts, AppID=AppID, AppKey=AppKey): ''' :param", "import requests import json import time import random import hashlib from urllib import", "需要打标签的文档列表 :param AppID: 腾讯ai账号信息,默认调用配置文件id_1 :param AppKey: 腾讯ai账号信息,默认调用配置文件id_1 :return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功 ''' url = tencent['api']['nlp_textpolar']['url']", "(count_i)) return results if __name__ == '__main__': results = creat_label(texts=['价格便宜啦,比原来优惠多了', '壁挂效果差,果然一分价钱一分货', '东西一般般,诶呀', '讨厌你',", "打好标签的列表,包括原始文档、标签、置信水平、是否成功 ''' url = tencent['api']['nlp_textpolar']['url'] results = [] # 逐句调用接口判断 count_i=0 for one_text", "url = tencent['api']['nlp_textpolar']['url'] results = [] # 逐句调用接口判断 count_i=0 for one_text in texts:", "# AppKey=AppKey) # 返回:BE918C28827E0783D1E5F8E6D7C37A61 params = OrderedDict() for i in sorted(params_raw): if params_raw[i]", "# 获取sign r = requests.post(url=url, params=params) # 获取分析结果 result = json.loads(r.text) # print(result)", "urllib import parse from collections import OrderedDict AppID = tencent['account']['id_1']['APP_ID'] AppKey = tencent['account']['id_1']['AppKey']", "as np import requests import json import time import random import hashlib from", "{'app_id': '10000', # 'time_stamp': '1493449657', # 'nonce_str': '20e3408a79', # 'key1': '腾讯AI开放平台', # 'key2':", "from collections import OrderedDict AppID = tencent['account']['id_1']['APP_ID'] AppKey = tencent['account']['id_1']['AppKey'] def cal_sign(params_raw,AppKey=AppKey): #", "''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for i in range(10)]), 'sign': '', 'text': one_text} params['sign'] = cal_sign(params_raw=params, AppKey=AppKey)", "'msg']) results['label'] = np.where(results['label'] == 1, '正面', np.where(results['label'] == 0, '中性', '负面')) print(results)" ]
[ "not used 'data_format': 'channels_last', # 'channels_last' 'channels_first' 'batch_size': batch_size, 'coord_scale': 1, 'noobj_scale': 1,", "[373.,326.]]] } image_augmentor_config = { 'data_format': 'channels_last', 'output_shape': [448, 448], # 'zoom_size': [520,", "# 'rotate': [0.5, -10., 10.], 'pad_truth_to': 60, } data = os.listdir('./voc2007/') data =", "as yolov3 # import matplotlib.pyplot as plt # import matplotlib.patches as patches #", "matplotlib.pyplot as plt # import matplotlib.patches as patches # from skimage import io,", "= testnet.train_one_epoch(lr) print('>> mean loss', mean_loss) testnet.save_weight('latest', './weight/test') # 'latest', 'best' # img", "testnet.save_weight('latest', './weight/test') # 'latest', 'best' # img = io.imread() # img = transform.resize(img,", "'val_generator': None # not used } testnet = yolov3.YOLOv3(config, trainset_provider) testnet.load_weight('./weight/test-40449') for i", "'train_generator': train_gen, 'val_generator': None # not used } testnet = yolov3.YOLOv3(config, trainset_provider) testnet.load_weight('./weight/test-40449')", "90.], [156., 198.], [373.,326.]]] } image_augmentor_config = { 'data_format': 'channels_last', 'output_shape': [448, 448],", "[[[10., 13.], [16, 30.], [33., 23.]], [[30., 61.], [62., 45.], [59., 119.]], [[116.,", "'batch_size': batch_size, 'coord_scale': 1, 'noobj_scale': 1, 'obj_scale': 5., 'class_scale': 1., 'num_priors': 3, 'nms_score_threshold':", "'nms_max_boxes': 10, 'nms_iou_threshold': 0.5, 'priors': [[[10., 13.], [16, 30.], [33., 23.]], [[30., 61.],", "'epoch', i, '-'*25) if i in reduce_lr_epoch: lr = lr/10. print('reduce lr, lr=',", "numpy as np import os import utils.tfrecord_voc_utils as voc_utils import YOLOv3 as yolov3", "mean_loss) testnet.save_weight('latest', './weight/test') # 'latest', 'best' # img = io.imread() # img =", "# rect = patches.Rectangle((bbox[i][1],bbox[i][0]), bbox[i][3]-bbox[i][1],bbox[i][2]-bbox[i][0],linewidth=2,edgecolor='b',facecolor='none') # axis.add_patch(rect) # plt.text(bbox[i][1],bbox[i][0], id_to_clasname[class_id[i]]+str(' ')+str(scores[i]), color='red', fontsize=12)", "[448, 448, 3], 'num_train': 5011, 'num_val': 0, # not used 'train_generator': train_gen, 'val_generator':", "= voc_utils.get_generator(data, batch_size, buffer_size, image_augmentor_config) trainset_provider = { 'data_shape': [448, 448, 3], 'num_train':", "lr=', lr, 'now') mean_loss = testnet.train_one_epoch(lr) print('>> mean loss', mean_loss) testnet.save_weight('latest', './weight/test') #", "= { 'data_format': 'channels_last', 'output_shape': [448, 448], # 'zoom_size': [520, 520], # 'crop_method':", "np import os import utils.tfrecord_voc_utils as voc_utils import YOLOv3 as yolov3 # import", "1., 'num_priors': 3, 'nms_score_threshold': 0.5, 'nms_max_boxes': 10, 'nms_iou_threshold': 0.5, 'priors': [[[10., 13.], [16,", "voc_utils.get_generator(data, batch_size, buffer_size, image_augmentor_config) trainset_provider = { 'data_shape': [448, 448, 3], 'num_train': 5011,", "batch_size, buffer_size, image_augmentor_config) trainset_provider = { 'data_shape': [448, 448, 3], 'num_train': 5011, 'num_val':", "os import utils.tfrecord_voc_utils as voc_utils import YOLOv3 as yolov3 # import matplotlib.pyplot as", "io, transform from utils.voc_classname_encoder import classname_to_ids os.environ['CUDA_VISIBLE_DEVICES'] = '0' lr = 0.001 batch_size", "'nms_iou_threshold': 0.5, 'priors': [[[10., 13.], [16, 30.], [33., 23.]], [[30., 61.], [62., 45.],", "[0.5, -10., 10.], 'pad_truth_to': 60, } data = os.listdir('./voc2007/') data = [os.path.join('./voc2007/', name)", "voc_utils import YOLOv3 as yolov3 # import matplotlib.pyplot as plt # import matplotlib.patches", "# 'channels_last' 'channels_first' 'batch_size': batch_size, 'coord_scale': 1, 'noobj_scale': 1, 'obj_scale': 5., 'class_scale': 1.,", "mean_loss = testnet.train_one_epoch(lr) print('>> mean loss', mean_loss) testnet.save_weight('latest', './weight/test') # 'latest', 'best' #", "result[1] # class_id = result[2] # print(scores, bbox, class_id) # plt.figure(1) # plt.imshow(np.squeeze(img))", "'keep_prob': 0.5, # not used 'data_format': 'channels_last', # 'channels_last' 'channels_first' 'batch_size': batch_size, 'coord_scale':", "# img = transform.resize(img, [448,448]) # img = np.expand_dims(img, 0) # result =", "# plt.imshow(np.squeeze(img)) # axis = plt.gca() # for i in range(len(scores)): # rect", "name) for name in data] train_gen = voc_utils.get_generator(data, batch_size, buffer_size, image_augmentor_config) trainset_provider =", "scores = result[0] # bbox = result[1] # class_id = result[2] # print(scores,", "import matplotlib.patches as patches # from skimage import io, transform from utils.voc_classname_encoder import", "'flip_prob': [0., 0.5], 'fill_mode': 'BILINEAR', 'keep_aspect_ratios': False, 'constant_values': 0., # 'color_jitter_prob': 0.5, #", "as tf import numpy as np import os import utils.tfrecord_voc_utils as voc_utils import", "256 epochs = 160 reduce_lr_epoch = [] config = { 'mode': 'train', #", "name in data] train_gen = voc_utils.get_generator(data, batch_size, buffer_size, image_augmentor_config) trainset_provider = { 'data_shape':", "skimage import io, transform from utils.voc_classname_encoder import classname_to_ids os.environ['CUDA_VISIBLE_DEVICES'] = '0' lr =", "[] config = { 'mode': 'train', # 'train', 'test' 'data_shape': [448, 448, 3],", "in range(epochs): print('-'*25, 'epoch', i, '-'*25) if i in reduce_lr_epoch: lr = lr/10.", "import tensorflow as tf import numpy as np import os import utils.tfrecord_voc_utils as", "classname_to_ids os.environ['CUDA_VISIBLE_DEVICES'] = '0' lr = 0.001 batch_size = 12 buffer_size = 256", "tf import numpy as np import os import utils.tfrecord_voc_utils as voc_utils import YOLOv3", "# 'crop_method': 'random', 'flip_prob': [0., 0.5], 'fill_mode': 'BILINEAR', 'keep_aspect_ratios': False, 'constant_values': 0., #", "'latest', 'best' # img = io.imread() # img = transform.resize(img, [448,448]) # img", "5e-4, 'keep_prob': 0.5, # not used 'data_format': 'channels_last', # 'channels_last' 'channels_first' 'batch_size': batch_size,", "in reduce_lr_epoch: lr = lr/10. print('reduce lr, lr=', lr, 'now') mean_loss = testnet.train_one_epoch(lr)", "config = { 'mode': 'train', # 'train', 'test' 'data_shape': [448, 448, 3], 'num_classes':", "# not used 'data_format': 'channels_last', # 'channels_last' 'channels_first' 'batch_size': batch_size, 'coord_scale': 1, 'noobj_scale':", "range(len(scores)): # rect = patches.Rectangle((bbox[i][1],bbox[i][0]), bbox[i][3]-bbox[i][1],bbox[i][2]-bbox[i][0],linewidth=2,edgecolor='b',facecolor='none') # axis.add_patch(rect) # plt.text(bbox[i][1],bbox[i][0], id_to_clasname[class_id[i]]+str(' ')+str(scores[i]), color='red',", "5011, 'num_val': 0, # not used 'train_generator': train_gen, 'val_generator': None # not used", "5., 'class_scale': 1., 'num_priors': 3, 'nms_score_threshold': 0.5, 'nms_max_boxes': 10, 'nms_iou_threshold': 0.5, 'priors': [[[10.,", "'num_priors': 3, 'nms_score_threshold': 0.5, 'nms_max_boxes': 10, 'nms_iou_threshold': 0.5, 'priors': [[[10., 13.], [16, 30.],", "rect = patches.Rectangle((bbox[i][1],bbox[i][0]), bbox[i][3]-bbox[i][1],bbox[i][2]-bbox[i][0],linewidth=2,edgecolor='b',facecolor='none') # axis.add_patch(rect) # plt.text(bbox[i][1],bbox[i][0], id_to_clasname[class_id[i]]+str(' ')+str(scores[i]), color='red', fontsize=12) #", "= { 'data_shape': [448, 448, 3], 'num_train': 5011, 'num_val': 0, # not used", "= { 'mode': 'train', # 'train', 'test' 'data_shape': [448, 448, 3], 'num_classes': 20,", "image_augmentor_config = { 'data_format': 'channels_last', 'output_shape': [448, 448], # 'zoom_size': [520, 520], #", "3], 'num_classes': 20, 'weight_decay': 5e-4, 'keep_prob': 0.5, # not used 'data_format': 'channels_last', #", "'nms_score_threshold': 0.5, 'nms_max_boxes': 10, 'nms_iou_threshold': 0.5, 'priors': [[[10., 13.], [16, 30.], [33., 23.]],", "0.5, 'nms_max_boxes': 10, 'nms_iou_threshold': 0.5, 'priors': [[[10., 13.], [16, 30.], [33., 23.]], [[30.,", "print('reduce lr, lr=', lr, 'now') mean_loss = testnet.train_one_epoch(lr) print('>> mean loss', mean_loss) testnet.save_weight('latest',", "[[116., 90.], [156., 198.], [373.,326.]]] } image_augmentor_config = { 'data_format': 'channels_last', 'output_shape': [448,", "class_id = result[2] # print(scores, bbox, class_id) # plt.figure(1) # plt.imshow(np.squeeze(img)) # axis", "# img = np.expand_dims(img, 0) # result = testnet.test_one_image(img) # id_to_clasname = {k:v", "'noobj_scale': 1, 'obj_scale': 5., 'class_scale': 1., 'num_priors': 3, 'nms_score_threshold': 0.5, 'nms_max_boxes': 10, 'nms_iou_threshold':", "# plt.figure(1) # plt.imshow(np.squeeze(img)) # axis = plt.gca() # for i in range(len(scores)):", "1, 'obj_scale': 5., 'class_scale': 1., 'num_priors': 3, 'nms_score_threshold': 0.5, 'nms_max_boxes': 10, 'nms_iou_threshold': 0.5,", "i, '-'*25) if i in reduce_lr_epoch: lr = lr/10. print('reduce lr, lr=', lr,", "'crop_method': 'random', 'flip_prob': [0., 0.5], 'fill_mode': 'BILINEAR', 'keep_aspect_ratios': False, 'constant_values': 0., # 'color_jitter_prob':", "for i in range(len(scores)): # rect = patches.Rectangle((bbox[i][1],bbox[i][0]), bbox[i][3]-bbox[i][1],bbox[i][2]-bbox[i][0],linewidth=2,edgecolor='b',facecolor='none') # axis.add_patch(rect) # plt.text(bbox[i][1],bbox[i][0],", "<filename>testYOLOv3.py import tensorflow as tf import numpy as np import os import utils.tfrecord_voc_utils", "i in range(len(scores)): # rect = patches.Rectangle((bbox[i][1],bbox[i][0]), bbox[i][3]-bbox[i][1],bbox[i][2]-bbox[i][0],linewidth=2,edgecolor='b',facecolor='none') # axis.add_patch(rect) # plt.text(bbox[i][1],bbox[i][0], id_to_clasname[class_id[i]]+str('", "= patches.Rectangle((bbox[i][1],bbox[i][0]), bbox[i][3]-bbox[i][1],bbox[i][2]-bbox[i][0],linewidth=2,edgecolor='b',facecolor='none') # axis.add_patch(rect) # plt.text(bbox[i][1],bbox[i][0], id_to_clasname[class_id[i]]+str(' ')+str(scores[i]), color='red', fontsize=12) # plt.show()", "import matplotlib.pyplot as plt # import matplotlib.patches as patches # from skimage import", "{ 'mode': 'train', # 'train', 'test' 'data_shape': [448, 448, 3], 'num_classes': 20, 'weight_decay':", "23.]], [[30., 61.], [62., 45.], [59., 119.]], [[116., 90.], [156., 198.], [373.,326.]]] }", "i in reduce_lr_epoch: lr = lr/10. print('reduce lr, lr=', lr, 'now') mean_loss =", "'keep_aspect_ratios': False, 'constant_values': 0., # 'color_jitter_prob': 0.5, # 'rotate': [0.5, -10., 10.], 'pad_truth_to':", "{ 'data_format': 'channels_last', 'output_shape': [448, 448], # 'zoom_size': [520, 520], # 'crop_method': 'random',", "[os.path.join('./voc2007/', name) for name in data] train_gen = voc_utils.get_generator(data, batch_size, buffer_size, image_augmentor_config) trainset_provider", "'rotate': [0.5, -10., 10.], 'pad_truth_to': 60, } data = os.listdir('./voc2007/') data = [os.path.join('./voc2007/',", "{k:v for (v,k) in classname_to_ids.items()} # scores = result[0] # bbox = result[1]", "'./weight/test') # 'latest', 'best' # img = io.imread() # img = transform.resize(img, [448,448])", "'channels_last', # 'channels_last' 'channels_first' 'batch_size': batch_size, 'coord_scale': 1, 'noobj_scale': 1, 'obj_scale': 5., 'class_scale':", "reduce_lr_epoch = [] config = { 'mode': 'train', # 'train', 'test' 'data_shape': [448,", "tensorflow as tf import numpy as np import os import utils.tfrecord_voc_utils as voc_utils", "# 'zoom_size': [520, 520], # 'crop_method': 'random', 'flip_prob': [0., 0.5], 'fill_mode': 'BILINEAR', 'keep_aspect_ratios':", "[0., 0.5], 'fill_mode': 'BILINEAR', 'keep_aspect_ratios': False, 'constant_values': 0., # 'color_jitter_prob': 0.5, # 'rotate':", "= plt.gca() # for i in range(len(scores)): # rect = patches.Rectangle((bbox[i][1],bbox[i][0]), bbox[i][3]-bbox[i][1],bbox[i][2]-bbox[i][0],linewidth=2,edgecolor='b',facecolor='none') #", "[448,448]) # img = np.expand_dims(img, 0) # result = testnet.test_one_image(img) # id_to_clasname =", "'test' 'data_shape': [448, 448, 3], 'num_classes': 20, 'weight_decay': 5e-4, 'keep_prob': 0.5, # not", "result[0] # bbox = result[1] # class_id = result[2] # print(scores, bbox, class_id)", "'output_shape': [448, 448], # 'zoom_size': [520, 520], # 'crop_method': 'random', 'flip_prob': [0., 0.5],", "3, 'nms_score_threshold': 0.5, 'nms_max_boxes': 10, 'nms_iou_threshold': 0.5, 'priors': [[[10., 13.], [16, 30.], [33.,", "# 'latest', 'best' # img = io.imread() # img = transform.resize(img, [448,448]) #", "# class_id = result[2] # print(scores, bbox, class_id) # plt.figure(1) # plt.imshow(np.squeeze(img)) #", "'-'*25) if i in reduce_lr_epoch: lr = lr/10. print('reduce lr, lr=', lr, 'now')", "testnet.test_one_image(img) # id_to_clasname = {k:v for (v,k) in classname_to_ids.items()} # scores = result[0]", "used 'train_generator': train_gen, 'val_generator': None # not used } testnet = yolov3.YOLOv3(config, trainset_provider)", "lr, 'now') mean_loss = testnet.train_one_epoch(lr) print('>> mean loss', mean_loss) testnet.save_weight('latest', './weight/test') # 'latest',", "'BILINEAR', 'keep_aspect_ratios': False, 'constant_values': 0., # 'color_jitter_prob': 0.5, # 'rotate': [0.5, -10., 10.],", "= os.listdir('./voc2007/') data = [os.path.join('./voc2007/', name) for name in data] train_gen = voc_utils.get_generator(data,", "'priors': [[[10., 13.], [16, 30.], [33., 23.]], [[30., 61.], [62., 45.], [59., 119.]],", "in range(len(scores)): # rect = patches.Rectangle((bbox[i][1],bbox[i][0]), bbox[i][3]-bbox[i][1],bbox[i][2]-bbox[i][0],linewidth=2,edgecolor='b',facecolor='none') # axis.add_patch(rect) # plt.text(bbox[i][1],bbox[i][0], id_to_clasname[class_id[i]]+str(' ')+str(scores[i]),", "= np.expand_dims(img, 0) # result = testnet.test_one_image(img) # id_to_clasname = {k:v for (v,k)", "data = [os.path.join('./voc2007/', name) for name in data] train_gen = voc_utils.get_generator(data, batch_size, buffer_size,", "= 12 buffer_size = 256 epochs = 160 reduce_lr_epoch = [] config =", "os.environ['CUDA_VISIBLE_DEVICES'] = '0' lr = 0.001 batch_size = 12 buffer_size = 256 epochs", "'data_format': 'channels_last', 'output_shape': [448, 448], # 'zoom_size': [520, 520], # 'crop_method': 'random', 'flip_prob':", "yolov3 # import matplotlib.pyplot as plt # import matplotlib.patches as patches # from", "(v,k) in classname_to_ids.items()} # scores = result[0] # bbox = result[1] # class_id", "'channels_last', 'output_shape': [448, 448], # 'zoom_size': [520, 520], # 'crop_method': 'random', 'flip_prob': [0.,", "# for i in range(len(scores)): # rect = patches.Rectangle((bbox[i][1],bbox[i][0]), bbox[i][3]-bbox[i][1],bbox[i][2]-bbox[i][0],linewidth=2,edgecolor='b',facecolor='none') # axis.add_patch(rect) #", "for name in data] train_gen = voc_utils.get_generator(data, batch_size, buffer_size, image_augmentor_config) trainset_provider = {", "520], # 'crop_method': 'random', 'flip_prob': [0., 0.5], 'fill_mode': 'BILINEAR', 'keep_aspect_ratios': False, 'constant_values': 0.,", "img = transform.resize(img, [448,448]) # img = np.expand_dims(img, 0) # result = testnet.test_one_image(img)", "= 256 epochs = 160 reduce_lr_epoch = [] config = { 'mode': 'train',", "transform.resize(img, [448,448]) # img = np.expand_dims(img, 0) # result = testnet.test_one_image(img) # id_to_clasname", "from utils.voc_classname_encoder import classname_to_ids os.environ['CUDA_VISIBLE_DEVICES'] = '0' lr = 0.001 batch_size = 12", "'data_format': 'channels_last', # 'channels_last' 'channels_first' 'batch_size': batch_size, 'coord_scale': 1, 'noobj_scale': 1, 'obj_scale': 5.,", "i in range(epochs): print('-'*25, 'epoch', i, '-'*25) if i in reduce_lr_epoch: lr =", "= [] config = { 'mode': 'train', # 'train', 'test' 'data_shape': [448, 448,", "'weight_decay': 5e-4, 'keep_prob': 0.5, # not used 'data_format': 'channels_last', # 'channels_last' 'channels_first' 'batch_size':", "plt.gca() # for i in range(len(scores)): # rect = patches.Rectangle((bbox[i][1],bbox[i][0]), bbox[i][3]-bbox[i][1],bbox[i][2]-bbox[i][0],linewidth=2,edgecolor='b',facecolor='none') # axis.add_patch(rect)", "'data_shape': [448, 448, 3], 'num_classes': 20, 'weight_decay': 5e-4, 'keep_prob': 0.5, # not used", "13.], [16, 30.], [33., 23.]], [[30., 61.], [62., 45.], [59., 119.]], [[116., 90.],", "0.001 batch_size = 12 buffer_size = 256 epochs = 160 reduce_lr_epoch = []", "119.]], [[116., 90.], [156., 198.], [373.,326.]]] } image_augmentor_config = { 'data_format': 'channels_last', 'output_shape':", "198.], [373.,326.]]] } image_augmentor_config = { 'data_format': 'channels_last', 'output_shape': [448, 448], # 'zoom_size':", "0.5, 'priors': [[[10., 13.], [16, 30.], [33., 23.]], [[30., 61.], [62., 45.], [59.,", "patches # from skimage import io, transform from utils.voc_classname_encoder import classname_to_ids os.environ['CUDA_VISIBLE_DEVICES'] =", "import utils.tfrecord_voc_utils as voc_utils import YOLOv3 as yolov3 # import matplotlib.pyplot as plt", "trainset_provider) testnet.load_weight('./weight/test-40449') for i in range(epochs): print('-'*25, 'epoch', i, '-'*25) if i in", "= testnet.test_one_image(img) # id_to_clasname = {k:v for (v,k) in classname_to_ids.items()} # scores =", "epochs = 160 reduce_lr_epoch = [] config = { 'mode': 'train', # 'train',", "'zoom_size': [520, 520], # 'crop_method': 'random', 'flip_prob': [0., 0.5], 'fill_mode': 'BILINEAR', 'keep_aspect_ratios': False,", "plt.imshow(np.squeeze(img)) # axis = plt.gca() # for i in range(len(scores)): # rect =", "# axis = plt.gca() # for i in range(len(scores)): # rect = patches.Rectangle((bbox[i][1],bbox[i][0]),", "buffer_size = 256 epochs = 160 reduce_lr_epoch = [] config = { 'mode':", "= result[1] # class_id = result[2] # print(scores, bbox, class_id) # plt.figure(1) #", "-10., 10.], 'pad_truth_to': 60, } data = os.listdir('./voc2007/') data = [os.path.join('./voc2007/', name) for", "utils.tfrecord_voc_utils as voc_utils import YOLOv3 as yolov3 # import matplotlib.pyplot as plt #", "trainset_provider = { 'data_shape': [448, 448, 3], 'num_train': 5011, 'num_val': 0, # not", "import classname_to_ids os.environ['CUDA_VISIBLE_DEVICES'] = '0' lr = 0.001 batch_size = 12 buffer_size =", "in classname_to_ids.items()} # scores = result[0] # bbox = result[1] # class_id =", "import os import utils.tfrecord_voc_utils as voc_utils import YOLOv3 as yolov3 # import matplotlib.pyplot", "# 'train', 'test' 'data_shape': [448, 448, 3], 'num_classes': 20, 'weight_decay': 5e-4, 'keep_prob': 0.5,", "'constant_values': 0., # 'color_jitter_prob': 0.5, # 'rotate': [0.5, -10., 10.], 'pad_truth_to': 60, }", "data] train_gen = voc_utils.get_generator(data, batch_size, buffer_size, image_augmentor_config) trainset_provider = { 'data_shape': [448, 448,", "'channels_last' 'channels_first' 'batch_size': batch_size, 'coord_scale': 1, 'noobj_scale': 1, 'obj_scale': 5., 'class_scale': 1., 'num_priors':", "160 reduce_lr_epoch = [] config = { 'mode': 'train', # 'train', 'test' 'data_shape':", "# not used 'train_generator': train_gen, 'val_generator': None # not used } testnet =", "in data] train_gen = voc_utils.get_generator(data, batch_size, buffer_size, image_augmentor_config) trainset_provider = { 'data_shape': [448,", "'mode': 'train', # 'train', 'test' 'data_shape': [448, 448, 3], 'num_classes': 20, 'weight_decay': 5e-4,", "range(epochs): print('-'*25, 'epoch', i, '-'*25) if i in reduce_lr_epoch: lr = lr/10. print('reduce", "'now') mean_loss = testnet.train_one_epoch(lr) print('>> mean loss', mean_loss) testnet.save_weight('latest', './weight/test') # 'latest', 'best'", "[[30., 61.], [62., 45.], [59., 119.]], [[116., 90.], [156., 198.], [373.,326.]]] } image_augmentor_config", "12 buffer_size = 256 epochs = 160 reduce_lr_epoch = [] config = {", "loss', mean_loss) testnet.save_weight('latest', './weight/test') # 'latest', 'best' # img = io.imread() # img", "testnet = yolov3.YOLOv3(config, trainset_provider) testnet.load_weight('./weight/test-40449') for i in range(epochs): print('-'*25, 'epoch', i, '-'*25)", "for i in range(epochs): print('-'*25, 'epoch', i, '-'*25) if i in reduce_lr_epoch: lr", "id_to_clasname = {k:v for (v,k) in classname_to_ids.items()} # scores = result[0] # bbox", "axis = plt.gca() # for i in range(len(scores)): # rect = patches.Rectangle((bbox[i][1],bbox[i][0]), bbox[i][3]-bbox[i][1],bbox[i][2]-bbox[i][0],linewidth=2,edgecolor='b',facecolor='none')", "[156., 198.], [373.,326.]]] } image_augmentor_config = { 'data_format': 'channels_last', 'output_shape': [448, 448], #", "[16, 30.], [33., 23.]], [[30., 61.], [62., 45.], [59., 119.]], [[116., 90.], [156.,", "data = os.listdir('./voc2007/') data = [os.path.join('./voc2007/', name) for name in data] train_gen =", "train_gen = voc_utils.get_generator(data, batch_size, buffer_size, image_augmentor_config) trainset_provider = { 'data_shape': [448, 448, 3],", "'num_val': 0, # not used 'train_generator': train_gen, 'val_generator': None # not used }", "0.5, # 'rotate': [0.5, -10., 10.], 'pad_truth_to': 60, } data = os.listdir('./voc2007/') data", "import YOLOv3 as yolov3 # import matplotlib.pyplot as plt # import matplotlib.patches as", "= 160 reduce_lr_epoch = [] config = { 'mode': 'train', # 'train', 'test'", "61.], [62., 45.], [59., 119.]], [[116., 90.], [156., 198.], [373.,326.]]] } image_augmentor_config =", "lr/10. print('reduce lr, lr=', lr, 'now') mean_loss = testnet.train_one_epoch(lr) print('>> mean loss', mean_loss)", "result[2] # print(scores, bbox, class_id) # plt.figure(1) # plt.imshow(np.squeeze(img)) # axis = plt.gca()", "0) # result = testnet.test_one_image(img) # id_to_clasname = {k:v for (v,k) in classname_to_ids.items()}", "lr = lr/10. print('reduce lr, lr=', lr, 'now') mean_loss = testnet.train_one_epoch(lr) print('>> mean", "img = io.imread() # img = transform.resize(img, [448,448]) # img = np.expand_dims(img, 0)", "0., # 'color_jitter_prob': 0.5, # 'rotate': [0.5, -10., 10.], 'pad_truth_to': 60, } data", "3], 'num_train': 5011, 'num_val': 0, # not used 'train_generator': train_gen, 'val_generator': None #", "= result[2] # print(scores, bbox, class_id) # plt.figure(1) # plt.imshow(np.squeeze(img)) # axis =", "classname_to_ids.items()} # scores = result[0] # bbox = result[1] # class_id = result[2]", "'num_train': 5011, 'num_val': 0, # not used 'train_generator': train_gen, 'val_generator': None # not", "45.], [59., 119.]], [[116., 90.], [156., 198.], [373.,326.]]] } image_augmentor_config = { 'data_format':", "[448, 448], # 'zoom_size': [520, 520], # 'crop_method': 'random', 'flip_prob': [0., 0.5], 'fill_mode':", "= lr/10. print('reduce lr, lr=', lr, 'now') mean_loss = testnet.train_one_epoch(lr) print('>> mean loss',", "10, 'nms_iou_threshold': 0.5, 'priors': [[[10., 13.], [16, 30.], [33., 23.]], [[30., 61.], [62.,", "plt # import matplotlib.patches as patches # from skimage import io, transform from", "image_augmentor_config) trainset_provider = { 'data_shape': [448, 448, 3], 'num_train': 5011, 'num_val': 0, #", "# import matplotlib.patches as patches # from skimage import io, transform from utils.voc_classname_encoder", "testnet.load_weight('./weight/test-40449') for i in range(epochs): print('-'*25, 'epoch', i, '-'*25) if i in reduce_lr_epoch:", "batch_size = 12 buffer_size = 256 epochs = 160 reduce_lr_epoch = [] config", "bbox = result[1] # class_id = result[2] # print(scores, bbox, class_id) # plt.figure(1)", "plt.figure(1) # plt.imshow(np.squeeze(img)) # axis = plt.gca() # for i in range(len(scores)): #", "as voc_utils import YOLOv3 as yolov3 # import matplotlib.pyplot as plt # import", "lr, lr=', lr, 'now') mean_loss = testnet.train_one_epoch(lr) print('>> mean loss', mean_loss) testnet.save_weight('latest', './weight/test')", "reduce_lr_epoch: lr = lr/10. print('reduce lr, lr=', lr, 'now') mean_loss = testnet.train_one_epoch(lr) print('>>", "[62., 45.], [59., 119.]], [[116., 90.], [156., 198.], [373.,326.]]] } image_augmentor_config = {", "print('>> mean loss', mean_loss) testnet.save_weight('latest', './weight/test') # 'latest', 'best' # img = io.imread()", "False, 'constant_values': 0., # 'color_jitter_prob': 0.5, # 'rotate': [0.5, -10., 10.], 'pad_truth_to': 60,", "train_gen, 'val_generator': None # not used } testnet = yolov3.YOLOv3(config, trainset_provider) testnet.load_weight('./weight/test-40449') for", "'fill_mode': 'BILINEAR', 'keep_aspect_ratios': False, 'constant_values': 0., # 'color_jitter_prob': 0.5, # 'rotate': [0.5, -10.,", "io.imread() # img = transform.resize(img, [448,448]) # img = np.expand_dims(img, 0) # result", "class_id) # plt.figure(1) # plt.imshow(np.squeeze(img)) # axis = plt.gca() # for i in", "'0' lr = 0.001 batch_size = 12 buffer_size = 256 epochs = 160", "# 'color_jitter_prob': 0.5, # 'rotate': [0.5, -10., 10.], 'pad_truth_to': 60, } data =", "used } testnet = yolov3.YOLOv3(config, trainset_provider) testnet.load_weight('./weight/test-40449') for i in range(epochs): print('-'*25, 'epoch',", "batch_size, 'coord_scale': 1, 'noobj_scale': 1, 'obj_scale': 5., 'class_scale': 1., 'num_priors': 3, 'nms_score_threshold': 0.5,", "= io.imread() # img = transform.resize(img, [448,448]) # img = np.expand_dims(img, 0) #", "= 0.001 batch_size = 12 buffer_size = 256 epochs = 160 reduce_lr_epoch =", "print(scores, bbox, class_id) # plt.figure(1) # plt.imshow(np.squeeze(img)) # axis = plt.gca() # for", "'pad_truth_to': 60, } data = os.listdir('./voc2007/') data = [os.path.join('./voc2007/', name) for name in", "as patches # from skimage import io, transform from utils.voc_classname_encoder import classname_to_ids os.environ['CUDA_VISIBLE_DEVICES']", "buffer_size, image_augmentor_config) trainset_provider = { 'data_shape': [448, 448, 3], 'num_train': 5011, 'num_val': 0,", "= [os.path.join('./voc2007/', name) for name in data] train_gen = voc_utils.get_generator(data, batch_size, buffer_size, image_augmentor_config)", "'channels_first' 'batch_size': batch_size, 'coord_scale': 1, 'noobj_scale': 1, 'obj_scale': 5., 'class_scale': 1., 'num_priors': 3,", "448, 3], 'num_train': 5011, 'num_val': 0, # not used 'train_generator': train_gen, 'val_generator': None", "testnet.train_one_epoch(lr) print('>> mean loss', mean_loss) testnet.save_weight('latest', './weight/test') # 'latest', 'best' # img =", "1, 'noobj_scale': 1, 'obj_scale': 5., 'class_scale': 1., 'num_priors': 3, 'nms_score_threshold': 0.5, 'nms_max_boxes': 10,", "YOLOv3 as yolov3 # import matplotlib.pyplot as plt # import matplotlib.patches as patches", "448, 3], 'num_classes': 20, 'weight_decay': 5e-4, 'keep_prob': 0.5, # not used 'data_format': 'channels_last',", "from skimage import io, transform from utils.voc_classname_encoder import classname_to_ids os.environ['CUDA_VISIBLE_DEVICES'] = '0' lr", "'class_scale': 1., 'num_priors': 3, 'nms_score_threshold': 0.5, 'nms_max_boxes': 10, 'nms_iou_threshold': 0.5, 'priors': [[[10., 13.],", "0, # not used 'train_generator': train_gen, 'val_generator': None # not used } testnet", "not used 'train_generator': train_gen, 'val_generator': None # not used } testnet = yolov3.YOLOv3(config,", "np.expand_dims(img, 0) # result = testnet.test_one_image(img) # id_to_clasname = {k:v for (v,k) in", "} image_augmentor_config = { 'data_format': 'channels_last', 'output_shape': [448, 448], # 'zoom_size': [520, 520],", "# scores = result[0] # bbox = result[1] # class_id = result[2] #", "# import matplotlib.pyplot as plt # import matplotlib.patches as patches # from skimage", "} testnet = yolov3.YOLOv3(config, trainset_provider) testnet.load_weight('./weight/test-40449') for i in range(epochs): print('-'*25, 'epoch', i,", "mean loss', mean_loss) testnet.save_weight('latest', './weight/test') # 'latest', 'best' # img = io.imread() #", "bbox, class_id) # plt.figure(1) # plt.imshow(np.squeeze(img)) # axis = plt.gca() # for i", "matplotlib.patches as patches # from skimage import io, transform from utils.voc_classname_encoder import classname_to_ids", "utils.voc_classname_encoder import classname_to_ids os.environ['CUDA_VISIBLE_DEVICES'] = '0' lr = 0.001 batch_size = 12 buffer_size", "'data_shape': [448, 448, 3], 'num_train': 5011, 'num_val': 0, # not used 'train_generator': train_gen,", "import numpy as np import os import utils.tfrecord_voc_utils as voc_utils import YOLOv3 as", "'color_jitter_prob': 0.5, # 'rotate': [0.5, -10., 10.], 'pad_truth_to': 60, } data = os.listdir('./voc2007/')", "} data = os.listdir('./voc2007/') data = [os.path.join('./voc2007/', name) for name in data] train_gen", "# not used } testnet = yolov3.YOLOv3(config, trainset_provider) testnet.load_weight('./weight/test-40449') for i in range(epochs):", "None # not used } testnet = yolov3.YOLOv3(config, trainset_provider) testnet.load_weight('./weight/test-40449') for i in", "# bbox = result[1] # class_id = result[2] # print(scores, bbox, class_id) #", "for (v,k) in classname_to_ids.items()} # scores = result[0] # bbox = result[1] #", "if i in reduce_lr_epoch: lr = lr/10. print('reduce lr, lr=', lr, 'now') mean_loss", "= '0' lr = 0.001 batch_size = 12 buffer_size = 256 epochs =", "0.5], 'fill_mode': 'BILINEAR', 'keep_aspect_ratios': False, 'constant_values': 0., # 'color_jitter_prob': 0.5, # 'rotate': [0.5,", "os.listdir('./voc2007/') data = [os.path.join('./voc2007/', name) for name in data] train_gen = voc_utils.get_generator(data, batch_size,", "[59., 119.]], [[116., 90.], [156., 198.], [373.,326.]]] } image_augmentor_config = { 'data_format': 'channels_last',", "= {k:v for (v,k) in classname_to_ids.items()} # scores = result[0] # bbox =", "transform from utils.voc_classname_encoder import classname_to_ids os.environ['CUDA_VISIBLE_DEVICES'] = '0' lr = 0.001 batch_size =", "448], # 'zoom_size': [520, 520], # 'crop_method': 'random', 'flip_prob': [0., 0.5], 'fill_mode': 'BILINEAR',", "print('-'*25, 'epoch', i, '-'*25) if i in reduce_lr_epoch: lr = lr/10. print('reduce lr,", "'random', 'flip_prob': [0., 0.5], 'fill_mode': 'BILINEAR', 'keep_aspect_ratios': False, 'constant_values': 0., # 'color_jitter_prob': 0.5,", "20, 'weight_decay': 5e-4, 'keep_prob': 0.5, # not used 'data_format': 'channels_last', # 'channels_last' 'channels_first'", "60, } data = os.listdir('./voc2007/') data = [os.path.join('./voc2007/', name) for name in data]", "lr = 0.001 batch_size = 12 buffer_size = 256 epochs = 160 reduce_lr_epoch", "= result[0] # bbox = result[1] # class_id = result[2] # print(scores, bbox,", "as plt # import matplotlib.patches as patches # from skimage import io, transform", "10.], 'pad_truth_to': 60, } data = os.listdir('./voc2007/') data = [os.path.join('./voc2007/', name) for name", "# print(scores, bbox, class_id) # plt.figure(1) # plt.imshow(np.squeeze(img)) # axis = plt.gca() #", "'best' # img = io.imread() # img = transform.resize(img, [448,448]) # img =", "[33., 23.]], [[30., 61.], [62., 45.], [59., 119.]], [[116., 90.], [156., 198.], [373.,326.]]]", "as np import os import utils.tfrecord_voc_utils as voc_utils import YOLOv3 as yolov3 #", "'train', # 'train', 'test' 'data_shape': [448, 448, 3], 'num_classes': 20, 'weight_decay': 5e-4, 'keep_prob':", "30.], [33., 23.]], [[30., 61.], [62., 45.], [59., 119.]], [[116., 90.], [156., 198.],", "img = np.expand_dims(img, 0) # result = testnet.test_one_image(img) # id_to_clasname = {k:v for", "= transform.resize(img, [448,448]) # img = np.expand_dims(img, 0) # result = testnet.test_one_image(img) #", "[448, 448, 3], 'num_classes': 20, 'weight_decay': 5e-4, 'keep_prob': 0.5, # not used 'data_format':", "{ 'data_shape': [448, 448, 3], 'num_train': 5011, 'num_val': 0, # not used 'train_generator':", "= yolov3.YOLOv3(config, trainset_provider) testnet.load_weight('./weight/test-40449') for i in range(epochs): print('-'*25, 'epoch', i, '-'*25) if", "# img = io.imread() # img = transform.resize(img, [448,448]) # img = np.expand_dims(img,", "# result = testnet.test_one_image(img) # id_to_clasname = {k:v for (v,k) in classname_to_ids.items()} #", "result = testnet.test_one_image(img) # id_to_clasname = {k:v for (v,k) in classname_to_ids.items()} # scores", "used 'data_format': 'channels_last', # 'channels_last' 'channels_first' 'batch_size': batch_size, 'coord_scale': 1, 'noobj_scale': 1, 'obj_scale':", "[520, 520], # 'crop_method': 'random', 'flip_prob': [0., 0.5], 'fill_mode': 'BILINEAR', 'keep_aspect_ratios': False, 'constant_values':", "# from skimage import io, transform from utils.voc_classname_encoder import classname_to_ids os.environ['CUDA_VISIBLE_DEVICES'] = '0'", "# id_to_clasname = {k:v for (v,k) in classname_to_ids.items()} # scores = result[0] #", "'num_classes': 20, 'weight_decay': 5e-4, 'keep_prob': 0.5, # not used 'data_format': 'channels_last', # 'channels_last'", "yolov3.YOLOv3(config, trainset_provider) testnet.load_weight('./weight/test-40449') for i in range(epochs): print('-'*25, 'epoch', i, '-'*25) if i", "0.5, # not used 'data_format': 'channels_last', # 'channels_last' 'channels_first' 'batch_size': batch_size, 'coord_scale': 1,", "import io, transform from utils.voc_classname_encoder import classname_to_ids os.environ['CUDA_VISIBLE_DEVICES'] = '0' lr = 0.001", "'train', 'test' 'data_shape': [448, 448, 3], 'num_classes': 20, 'weight_decay': 5e-4, 'keep_prob': 0.5, #", "'coord_scale': 1, 'noobj_scale': 1, 'obj_scale': 5., 'class_scale': 1., 'num_priors': 3, 'nms_score_threshold': 0.5, 'nms_max_boxes':", "not used } testnet = yolov3.YOLOv3(config, trainset_provider) testnet.load_weight('./weight/test-40449') for i in range(epochs): print('-'*25,", "'obj_scale': 5., 'class_scale': 1., 'num_priors': 3, 'nms_score_threshold': 0.5, 'nms_max_boxes': 10, 'nms_iou_threshold': 0.5, 'priors':" ]
[ "#------------------------------------------------------------------------------- # Description: defines classes for processing tactile data to be used for", "for #each taxel of the tactile sensor featureVector = np.zeros(numberTaxels) #scan all the", "#for every sample, get the moving average response for z in range(np.size(datavector,1)): datavector[i,z]", "data based on the list of the text files to be loaded #and", "import matplotlib.pyplot as plt #matplotlib NROWS = 4 #number of columns in the", "tactile sensor featureVector = np.zeros(nrows*ncols) #matrix M*NxT where each row corresponds to a", "exists if os.path.isfile(filepath): self.modelSVM = joblib.load(filepath) #loads the SVM model return True #load", "new SVM model self.modelSVM = SVC() #pass the training data and the labels", "numpy as np import scipy as sp from sklearn.svm import SVC from sklearn.externals", "of peaks in the signal #and built the feature vector #find the peaks", "length of 'tmppeaks' featureVector[counter] = len(tmppeaks) #increment the counter counter+=1 #list of list,", "handling data, the class 'BrailleHandler' should be used instead #------------------------------------------------------------------------------- ''' #------------------------------------------------------------------------------- #-------------------------------------------------------------------------------", "the moving average response for z in range(np.size(datavector,1)): datavector[i,z] = mva.getSample(datavector[i,z]) #find the", "Contact: #------------------------------------------------------------------------------- # Description: defines classes for processing tactile data to be used", "of the text files to be loaded #and the labels corresponding for each", "data to a 3D matrix tactileData = BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS) #feature vector containing the number", "corresponding for each text data def createTrainingData(dataFiles,nrows,ncols,filt=False): for k in range(len(dataFiles)): #get the", "= mva.getSample(datavector[i,z]) #find the number of peaks peakTh = 0.05 #threshold for peak", "braille data from file #2D matrix datafile = np.loadtxt('NewData_BRC/BRC_B1.txt') #convert data to a", "= inputMatrix.shape[2] #number of samples #feature vector containing the number of peaks for", "#find the number of peaks peakTh = 0.05 #threshold for peak detection #create", "it is the first iteration, create the training data if k != 0:", "detecting peaks #load the braille data from file #2D matrix datafile = np.loadtxt('NewData_BRC/BRC_B1.txt')", "# this class abstracts the process of data processing, meaning that it only", "one column) #into a 3D array (row,col,frame) def oldconvert2frames(data,nrows,ncols): datamat = np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int) c", "matrix #--------------------------------------------------------------------------- #find the number of peaks in every single taxel def countPeaks(inputMatrix,threshold):", "average response for z in range(np.size(datavector,1)): datavector[i,z] = mva.getSample(datavector[i,z]) #find the number of", "in range(np.size(datavector,1)): datavector[i,z] = mva.getSample(datavector[i,z]) #find the number of peaks peakTh = 0.05", "data and the labels for training self.modelSVM.fit(trainingData,labels) #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #classification #features should be", "counter = 0 #taxel counter for k in range(NROWS*NCOLS): #scan all the columns", "in range(nrows): for j in range(ncols): dataVector[taxelCounter] = datamat[i,j,:] taxelCounter+=1 return dataVector #return", "counter = 0 #loop through the rows for k in range(nrows): #loop through", "#loop through the columns for w in range(ncols): #get a single taxel signal", "for j in range(ncols): dataVector[taxelCounter] = datamat[i,j,:] taxelCounter+=1 return dataVector #return the dataVector", "the signal sample by sample tactileMVA[counter,z] = mva.getSample(tactileVector[k,z]) counter+=1 #increment the taxel counter", "#--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #classification #features should be a feature vector following the same pattern", "len(inputMatrix.shape) == 3: #3D matrix nrows = inputMatrix.shape[0] #number of rows ncols =", "found #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #save a new SVM model def save(self,filename): #saving joblib.dump(self.modelSVM,filename+'.pkl') #---------------------------------------------------------------------------", "sample, get the moving average response for z in range(np.size(datavector,1)): datavector[i,z] = mva.getSample(datavector[i,z])", "training self.modelSVM.fit(trainingData,labels) #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #classification #features should be a feature vector following the", "= MovingAverage() #window size = 10, sampfreq = 100 Hz #for every sample,", "used to recognize braille characters. # this class abstracts the process of data", "#import the detect_peaks method #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #Feature extraction for SVM-based braille classification class", "defines classes for processing tactile data to be used for # braille recognition.", "class BrailleHandler(): #--------------------------------------------------------------------------- #read a file and return the data def loadFile(filepath): if", "feature vector following the same pattern #that was used for training def classify(self,features):", "#moving average of the 2D matrix #create a moving average object #default parameters,", "False #file not found #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #save a new SVM model def save(self,filename):", "in the data return np.loadtxt(filepath) else: return False #file not found def convert2vector(data):", "def oldconvert2frames(data,nrows,ncols): datamat = np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int) c = 0 for ii in range(0,(np.size(data,0)-nrows),nrows): datamat[:,:,c]", "taxel features = BrailleHandler.countPeaks(tactileData,peakTh) #--------------------------------------------------------------------------- #feature extraction with 2D array #moving average of", "# The 'Braille' class stores the SVM model used to recognize braille characters.", "# Singapore # URL: http://www.sinapseinstitute.org #------------------------------------------------------------------------------- # Neuromorphic Engineering Group # Author: <NAME>,", "labels for training self.modelSVM.fit(trainingData,labels) #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #classification #features should be a feature vector", "and the labels for training self.modelSVM.fit(trainingData,labels) #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #classification #features should be a", "import os, os.path, sys sys.path.append('../general') import numpy as np import scipy as sp", "array (every line contains reading from all rows for one column) #into a", "each taxel features = BrailleHandler.countPeaks(tactileData,peakTh) #--------------------------------------------------------------------------- #feature extraction with 2D array #moving average", "#window size = 10, sampfreq = 100 Hz #for every sample, get the", "vector containing the number of peaks for each taxel features = BrailleHandler.countPeaks(tactileData,peakTh) #---------------------------------------------------------------------------", "signal sample by sample tactileMVA[counter,z] = mva.getSample(tactileVector[k,z]) counter+=1 #increment the taxel counter #with", "#each taxel of the tactile sensor featureVector = np.zeros(numberTaxels) #scan all the taxels", "for k in range(nrows): #loop through the columns for w in range(ncols): #get", "peaks tmppeaks = detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False) #number of peaks is the length of 'tmppeaks' featureVector[counter]", "NEUROTECHNOLOGY - SINAPSE # Singapore # URL: http://www.sinapseinstitute.org #------------------------------------------------------------------------------- # Neuromorphic Engineering Group", "return the data def loadFile(filepath): if os.path.isfile(filepath): #return the data contained in the", "datamat = np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int) c = 0 for ii in range(0,(np.size(data,0)-nrows),nrows): datamat[:,:,c] = data[ii:ii+nrows,:]", "trainingData = featurevector return trainingData #------------------------------------------------------------------------------- #Braille Recognition Class class Braille(): def __init__(self):", "braille classification class BrailleHandler(): #--------------------------------------------------------------------------- #read a file and return the data def", "True #load ok else: return False #file not found #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #save a", "datafile = np.loadtxt('NewData_BRC/BRC_B1.txt') #convert data to a 3D matrix tactileData = BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS) #feature", "of data processing, meaning that it only deals # with the data ready", "features = BrailleHandler.countPeaks(tactileData,peakTh) #--------------------------------------------------------------------------- #feature extraction with 2D array #moving average of the", "where each row corresponds to a taxel and the #columns to the time", "#each taxel of the tactile sensor featureVector = np.zeros(nrows*ncols) #matrix M*NxT where each", "the length of 'tmppeaks' featureVector[counter] = len(tmppeaks) #increment the counter counter+=1 #list of", "oldconvert2vector(data,nrows,ncols): #first convert to 3D matrix datamat = BrailleHandler.oldconvert2frames(data,nrows,ncols) numsamples = np.size(datamat,2) #number", "is the first iteration, create the training data if k != 0: trainingData", "numsamples = np.size(tactileData,2) #total number of samples tactileMVA = np.zeros((NROWS*NCOLS,numsamples)) counter = 0", "2D array (every line contains reading from all rows for one column) #into", "return trainingData #------------------------------------------------------------------------------- #Braille Recognition Class class Braille(): def __init__(self): #labels for every", "#datavector = BrailleHandler.oldconvert2vector(datafile,nrows,ncols) datavector = BrailleHandler.convert2vector(datafile) #if data should be filtered if filt", "columns for z in range(numsamples): #filtering the signal sample by sample tactileMVA[counter,z] =", "the SVM model return True #load ok else: return False #file not found", "= np.size(datamat,2) #number of samples or frames dataVector = np.zeros((nrows*ncols,numsamples)) taxelCounter = 0", "and/or classification procedures. # For handling data, the class 'BrailleHandler' should be used", "the file that are arranged #in a 2D array (every line contains reading", "Neuromorphic Engineering Group # Author: <NAME>, PhD # Contact: #------------------------------------------------------------------------------- # Description: defines", "of peaks is the length of 'tmppeaks' featureVector[counter] = len(tmppeaks) #increment the counter", "each text data def createTrainingData(dataFiles,nrows,ncols,filt=False): for k in range(len(dataFiles)): #get the filename filename", "model return True #load ok else: return False #file not found #--------------------------------------------------------------------------- #---------------------------------------------------------------------------", "characters. # this class abstracts the process of data processing, meaning that it", "size = 10, sampfreq = 100 Hz #for every sample, get the moving", "INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE # Singapore # URL: http://www.sinapseinstitute.org #------------------------------------------------------------------------------- # Neuromorphic", "in range(ncols): dataVector[taxelCounter] = datamat[i,j,:] taxelCounter+=1 return dataVector #return the dataVector #convert data", "#find the total number of taxels in the tactile array numberTaxels = len(inputMatrix)", "#count the number of peaks in the signal #and built the feature vector", "braille characters. # this class abstracts the process of data processing, meaning that", "tactileMVA[counter,z] = mva.getSample(tactileVector[k,z]) counter+=1 #increment the taxel counter #with the filtered data, count", "for ii in range(0,(np.size(data,0)-nrows),nrows): datamat[:,:,c] = data[ii:ii+nrows,:] c = c+1 return datamat #return", "create the training data if k != 0: trainingData = np.vstack((trainingData,featurevector)) else: trainingData", "#3D matrix nrows = inputMatrix.shape[0] #number of rows ncols = inputMatrix.shape[1] #number of", "to 3D matrix datamat = BrailleHandler.oldconvert2frames(data,nrows,ncols) numsamples = np.size(datamat,2) #number of samples or", "utf-8 -*- ''' #------------------------------------------------------------------------------- # NATIONAL UNIVERSITY OF SINGAPORE - NUS # SINGAPORE", "into a vector def oldconvert2vector(data,nrows,ncols): #first convert to 3D matrix datamat = BrailleHandler.oldconvert2frames(data,nrows,ncols)", "of the tactileSignal matrix counter = 0 #loop through the rows for k", "= 4 #number of lines in the tactile array peakTh = 300 #threshold", "np #numpy import matplotlib.pyplot as plt #matplotlib NROWS = 4 #number of columns", "np.size(tactileData,2) #total number of samples tactileMVA = np.zeros((NROWS*NCOLS,numsamples)) counter = 0 #taxel counter", "#find the peaks tmppeaks = detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False) #number of peaks is the length of", "for training self.modelSVM.fit(trainingData,labels) #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #classification #features should be a feature vector following", "taxelCounter = 0 for i in range(nrows): for j in range(ncols): dataVector[taxelCounter] =", "to a taxel and the #columns to the time series signal tactileSignal =", "sys.path.append('../general') import numpy as np import scipy as sp from sklearn.svm import SVC", "= BrailleHandler.oldconvert2vector(datafile,nrows,ncols) datavector = BrailleHandler.convert2vector(datafile) #if data should be filtered if filt ==", "of a single taxel else: #find the total number of taxels in the", "#and the labels corresponding for each text data def createTrainingData(dataFiles,nrows,ncols,filt=False): for k in", "#--------------------------------------------------------------------------- #load a pre-trained SVM model from a file def load(self,filepath): #checks if", "ii in range(0,(np.size(data,0)-nrows),nrows): datamat[:,:,c] = data[ii:ii+nrows,:] c = c+1 return datamat #return the", "return featureVector #------------------------------------------------------------------------------- #create the training data based on the list of the", "detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False) #number of peaks is the length of 'tmppeaks' featureVector[counter] = len(tmppeaks) #increment", "data from file #2D matrix datafile = np.loadtxt('NewData_BRC/BRC_B1.txt') #convert data to a 3D", "the tactile array NCOLS = 4 #number of lines in the tactile array", "featureVector = np.zeros(nrows*ncols) #matrix M*NxT where each row corresponds to a taxel and", "len(tmppeaks) #return the feature vector return featureVector #------------------------------------------------------------------------------- #create the training data based", "text files to be loaded #and the labels corresponding for each text data", "c = 0 for ii in range(0,(np.size(data,0)-nrows),nrows): datamat[:,:,c] = data[ii:ii+nrows,:] c = c+1", "np.zeros((nrows*ncols,nsamples)) #counter for the index of the tactileSignal matrix counter = 0 #loop", "#for every taxel for i in range(np.size(datavector,0)): mva = MovingAverage() #window size =", "row corresponds to a taxel and the #columns to the time series signal", "data, count peaks again filtFeatures = BrailleHandler.countPeaks(tactileMVA,peakTh) #print the filtered feature vector print(filtFeatures)", "a feature vector following the same pattern #that was used for training def", "Engineering Group # Author: <NAME>, PhD # Contact: #------------------------------------------------------------------------------- # Description: defines classes", "os.path.isfile(filepath): #return the data contained in the data return np.loadtxt(filepath) else: return False", "#2D matrix datafile = np.loadtxt('NewData_BRC/BRC_B1.txt') #convert data to a 3D matrix tactileData =", "Author: <NAME>, PhD # Contact: #------------------------------------------------------------------------------- # Description: defines classes for processing tactile", "URL: http://www.sinapseinstitute.org #------------------------------------------------------------------------------- # Neuromorphic Engineering Group # Author: <NAME>, PhD # Contact:", "filename filename = dataFiles[k] #load the data datafile = BrailleHandler.loadFile(filename) #convert to vector", "import numpy as np #numpy import matplotlib.pyplot as plt #matplotlib NROWS = 4", "Description: defines classes for processing tactile data to be used for # braille", "#number of lines in the tactile array peakTh = 300 #threshold for detecting", "time series signal tactileSignal = np.zeros((nrows*ncols,nsamples)) #counter for the index of the tactileSignal", "files to be loaded #and the labels corresponding for each text data def", "k in range(nrows): #loop through the columns for w in range(ncols): #get a", "same pattern #that was used for training def classify(self,features): #check if there is", "samples tactileMVA = np.zeros((NROWS*NCOLS,numsamples)) counter = 0 #taxel counter for k in range(NROWS*NCOLS):", "#counter for the index of the tactileSignal matrix counter = 0 #loop through", "if k != 0: trainingData = np.vstack((trainingData,featurevector)) else: trainingData = featurevector return trainingData", "for # braille recognition. # The 'Braille' class stores the SVM model used", "featurevector return trainingData #------------------------------------------------------------------------------- #Braille Recognition Class class Braille(): def __init__(self): #labels for", "SVC() #pass the training data and the labels for training self.modelSVM.fit(trainingData,labels) #--------------------------------------------------------------------------- #---------------------------------------------------------------------------", "ready for training and/or classification procedures. # For handling data, the class 'BrailleHandler'", "matrix nrows = inputMatrix.shape[0] #number of rows ncols = inputMatrix.shape[1] #number of columns", "def oldconvert2vector(data,nrows,ncols): #first convert to 3D matrix datamat = BrailleHandler.oldconvert2frames(data,nrows,ncols) numsamples = np.size(datamat,2)", "is not None: #classify based on the input features svmResp = self.modelSVM.predict(features) #return", "len(inputMatrix) #feature vector containing the number of peaks for #each taxel of the", "for each taxel features = BrailleHandler.countPeaks(tactileData,peakTh) #--------------------------------------------------------------------------- #feature extraction with 2D array #moving", "SVM model used to recognize braille characters. # this class abstracts the process", "#into a 3D array (row,col,frame) def oldconvert2frames(data,nrows,ncols): datamat = np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int) c = 0", "through the columns for w in range(ncols): #get a single taxel signal tactileSignal[counter]", "#--------------------------------------------------------------------------- #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- if __name__=='__main__': #--------------------------------------------------------------------------- import numpy as np #numpy import matplotlib.pyplot", "Singapore # URL: http://www.sinapseinstitute.org #------------------------------------------------------------------------------- # Neuromorphic Engineering Group # Author: <NAME>, PhD", "range(np.size(datavector,0)): mva = MovingAverage() #window size = 10, sampfreq = 100 Hz #for", "#matplotlib NROWS = 4 #number of columns in the tactile array NCOLS =", "np.vstack((trainingData,featurevector)) else: trainingData = featurevector return trainingData #------------------------------------------------------------------------------- #Braille Recognition Class class Braille():", "coding: utf-8 -*- ''' #------------------------------------------------------------------------------- # NATIONAL UNIVERSITY OF SINGAPORE - NUS #", "np.transpose(data) #convert the data from a file into a vector def oldconvert2vector(data,nrows,ncols): #first", "deals # with the data ready for training and/or classification procedures. # For", "that it only deals # with the data ready for training and/or classification", "process of data processing, meaning that it only deals # with the data", "the columns for w in range(ncols): #get a single taxel signal tactileSignal[counter] =", "sampfreq = 100 Hz mva = MovingAverage() tactileVector = BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS) numsamples = np.size(tactileData,2)", "the file exists if os.path.isfile(filepath): self.modelSVM = joblib.load(filepath) #loads the SVM model return", "following the same pattern #that was used for training def classify(self,features): #check if", "class abstracts the process of data processing, meaning that it only deals #", "Hz mva = MovingAverage() tactileVector = BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS) numsamples = np.size(tactileData,2) #total number of", "3D matrix tactileData = BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS) #feature vector containing the number of peaks for", "detection #create the feature vector featurevector = BrailleHandler.countPeaks(datavector,peakTh) #if it is the first", "be filtered if filt == True: #for every taxel for i in range(np.size(datavector,0)):", "labels corresponding for each text data def createTrainingData(dataFiles,nrows,ncols,filt=False): for k in range(len(dataFiles)): #get", "ncols = inputMatrix.shape[1] #number of columns nsamples = inputMatrix.shape[2] #number of samples #feature", "__init__(self): #labels for every class #dictionary to associate label names and values self.classes", "#--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #load a pre-trained SVM model from a file def load(self,filepath): #checks", "#load a pre-trained SVM model from a file def load(self,filepath): #checks if the", "'Braille' class stores the SVM model used to recognize braille characters. # this", "Group # Author: <NAME>, PhD # Contact: #------------------------------------------------------------------------------- # Description: defines classes for", "import joblib from dataprocessing import * #import the detect_peaks method #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #Feature", "= 100 Hz mva = MovingAverage() tactileVector = BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS) numsamples = np.size(tactileData,2) #total", "#return the data contained in the data return np.loadtxt(filepath) else: return False #file", "loaded #and the labels corresponding for each text data def createTrainingData(dataFiles,nrows,ncols,filt=False): for k", "The 'Braille' class stores the SVM model used to recognize braille characters. #", "from file #2D matrix datafile = np.loadtxt('NewData_BRC/BRC_B1.txt') #convert data to a 3D matrix", "array numberTaxels = len(inputMatrix) #feature vector containing the number of peaks for #each", "datamat #return the 3D matrix #--------------------------------------------------------------------------- #find the number of peaks in every", "of columns nsamples = inputMatrix.shape[2] #number of samples #feature vector containing the number", "lines in the tactile array peakTh = 300 #threshold for detecting peaks #load", "FOR NEUROTECHNOLOGY - SINAPSE # Singapore # URL: http://www.sinapseinstitute.org #------------------------------------------------------------------------------- # Neuromorphic Engineering", "matrix #create a moving average object #default parameters, windowsize = 10, sampfreq =", "of the 2D matrix #create a moving average object #default parameters, windowsize =", "# braille recognition. # The 'Braille' class stores the SVM model used to", "datafile = BrailleHandler.loadFile(filename) #convert to vector #datavector = BrailleHandler.oldconvert2vector(datafile,nrows,ncols) datavector = BrailleHandler.convert2vector(datafile) #if", "taxel signal tactileSignal[counter] = inputMatrix[k,w,:] #count the number of peaks in the signal", "a new SVM model def save(self,filename): #saving joblib.dump(self.modelSVM,filename+'.pkl') #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #train a SVM", "the data from a file into a vector def oldconvert2vector(data,nrows,ncols): #first convert to", "#------------------------------------------------------------------------------- #create the training data based on the list of the text files", "mva = MovingAverage() #window size = 10, sampfreq = 100 Hz #for every", "= joblib.load(filepath) #loads the SVM model return True #load ok else: return False", "for peak detection #create the feature vector featurevector = BrailleHandler.countPeaks(datavector,peakTh) #if it is", "#dictionary to associate label names and values self.classes = dict() #SVM model self.modelSVM", "training data and the labels for training self.modelSVM.fit(trainingData,labels) #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #classification #features should", "sensor featureVector = np.zeros(numberTaxels) #scan all the taxels for k in range(numberTaxels): #find", "k in range(numberTaxels): #find the peaks tmppeaks = detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False) #number of peaks is", "0: trainingData = np.vstack((trainingData,featurevector)) else: trainingData = featurevector return trainingData #------------------------------------------------------------------------------- #Braille Recognition", "range(nrows): for j in range(ncols): dataVector[taxelCounter] = datamat[i,j,:] taxelCounter+=1 return dataVector #return the", "be loaded #and the labels corresponding for each text data def createTrainingData(dataFiles,nrows,ncols,filt=False): for", "stores the SVM model used to recognize braille characters. # this class abstracts", "moving average response for z in range(np.size(datavector,1)): datavector[i,z] = mva.getSample(datavector[i,z]) #find the number", "iteration, create the training data if k != 0: trainingData = np.vstack((trainingData,featurevector)) else:", "self.classes = dict() #SVM model self.modelSVM = None #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #load a pre-trained", "extraction with 2D array #moving average of the 2D matrix #create a moving", "#--------------------------------------------------------------------------- #find the number of peaks in every single taxel def countPeaks(inputMatrix,threshold): if", "featureVector = np.zeros(numberTaxels) #scan all the taxels for k in range(numberTaxels): #find the", "#convert to vector #datavector = BrailleHandler.oldconvert2vector(datafile,nrows,ncols) datavector = BrailleHandler.convert2vector(datafile) #if data should be", "label names and values self.classes = dict() #SVM model self.modelSVM = None #---------------------------------------------------------------------------", "model to classify the data if self.modelSVM is not None: #classify based on", "for #each taxel of the tactile sensor featureVector = np.zeros(nrows*ncols) #matrix M*NxT where", "sklearn.svm import SVC from sklearn.externals import joblib from dataprocessing import * #import the", "Class class Braille(): def __init__(self): #labels for every class #dictionary to associate label", "object #default parameters, windowsize = 10, sampfreq = 100 Hz mva = MovingAverage()", "for z in range(np.size(datavector,1)): datavector[i,z] = mva.getSample(datavector[i,z]) #find the number of peaks peakTh", "#return the 3D matrix #--------------------------------------------------------------------------- #find the number of peaks in every single", "#load the braille data from file #2D matrix datafile = np.loadtxt('NewData_BRC/BRC_B1.txt') #convert data", "the class 'BrailleHandler' should be used instead #------------------------------------------------------------------------------- ''' #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #LIBRARIES import", "trainingData #------------------------------------------------------------------------------- #Braille Recognition Class class Braille(): def __init__(self): #labels for every class", "based on the list of the text files to be loaded #and the", "#file not found #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #save a new SVM model def save(self,filename): #saving", "of peaks is the length of 'tmppeaks' featureVector[k] = len(tmppeaks) #return the feature", "was used for training def classify(self,features): #check if there is a SVM model", "array (row,col,frame) def oldconvert2frames(data,nrows,ncols): datamat = np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int) c = 0 for ii in", "BrailleHandler.oldconvert2vector(datafile,nrows,ncols) datavector = BrailleHandler.convert2vector(datafile) #if data should be filtered if filt == True:", "scipy as sp from sklearn.svm import SVC from sklearn.externals import joblib from dataprocessing", "a vector def oldconvert2vector(data,nrows,ncols): #first convert to 3D matrix datamat = BrailleHandler.oldconvert2frames(data,nrows,ncols) numsamples", "BrailleHandler.loadFile(filename) #convert to vector #datavector = BrailleHandler.oldconvert2vector(datafile,nrows,ncols) datavector = BrailleHandler.convert2vector(datafile) #if data should", "of samples or frames dataVector = np.zeros((nrows*ncols,numsamples)) taxelCounter = 0 for i in", "frames dataVector = np.zeros((nrows*ncols,numsamples)) taxelCounter = 0 for i in range(nrows): for j", "range(np.size(datavector,1)): datavector[i,z] = mva.getSample(datavector[i,z]) #find the number of peaks peakTh = 0.05 #threshold", "# Contact: #------------------------------------------------------------------------------- # Description: defines classes for processing tactile data to be", "the list of the text files to be loaded #and the labels corresponding", "tactileSignal matrix counter = 0 #loop through the rows for k in range(nrows):", "= BrailleHandler.convert2vector(datafile) #if data should be filtered if filt == True: #for every", "of lines in the tactile array peakTh = 300 #threshold for detecting peaks", "#------------------------------------------------------------------------------- ''' #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #LIBRARIES import os, os.path, sys sys.path.append('../general') import numpy as", "data def loadFile(filepath): if os.path.isfile(filepath): #return the data contained in the data return", "a moving average object #default parameters, windowsize = 10, sampfreq = 100 Hz", "#with the filtered data, count peaks again filtFeatures = BrailleHandler.countPeaks(tactileMVA,peakTh) #print the filtered", "#number of peaks is the length of 'tmppeaks' featureVector[counter] = len(tmppeaks) #increment the", "# Author: <NAME>, PhD # Contact: #------------------------------------------------------------------------------- # Description: defines classes for processing", "the length of 'tmppeaks' featureVector[k] = len(tmppeaks) #return the feature vector return featureVector", "BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS) #feature vector containing the number of peaks for each taxel features =", "class #dictionary to associate label names and values self.classes = dict() #SVM model", "taxel else: #find the total number of taxels in the tactile array numberTaxels", "signal tactileSignal[counter] = inputMatrix[k,w,:] #count the number of peaks in the signal #and", "data return np.loadtxt(filepath) else: return False #file not found def convert2vector(data): return np.transpose(data)", "#return the dataVector #convert data from the file that are arranged #in a", "= c+1 return datamat #return the 3D matrix #--------------------------------------------------------------------------- #find the number of", "to #the time series of a single taxel else: #find the total number", "be used for # braille recognition. # The 'Braille' class stores the SVM", "class Braille(): def __init__(self): #labels for every class #dictionary to associate label names", "numsamples = np.size(datamat,2) #number of samples or frames dataVector = np.zeros((nrows*ncols,numsamples)) taxelCounter =", "datamat[i,j,:] taxelCounter+=1 return dataVector #return the dataVector #convert data from the file that", "dataVector #return the dataVector #convert data from the file that are arranged #in", "peaks is the length of 'tmppeaks' featureVector[counter] = len(tmppeaks) #increment the counter counter+=1", "filename = dataFiles[k] #load the data datafile = BrailleHandler.loadFile(filename) #convert to vector #datavector", "of samples #feature vector containing the number of peaks for #each taxel of", "containing the number of peaks for #each taxel of the tactile sensor featureVector", "to the time series signal tactileSignal = np.zeros((nrows*ncols,nsamples)) #counter for the index of", "for every class #dictionary to associate label names and values self.classes = dict()", "for training and/or classification procedures. # For handling data, the class 'BrailleHandler' should", "tactileData = BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS) #feature vector containing the number of peaks for each taxel", "if len(inputMatrix.shape) == 3: #3D matrix nrows = inputMatrix.shape[0] #number of rows ncols", "filt == True: #for every taxel for i in range(np.size(datavector,0)): mva = MovingAverage()", "-*- ''' #------------------------------------------------------------------------------- # NATIONAL UNIVERSITY OF SINGAPORE - NUS # SINGAPORE INSTITUTE", "model self.modelSVM = None #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #load a pre-trained SVM model from a", "#file not found def convert2vector(data): return np.transpose(data) #convert the data from a file", "if filt == True: #for every taxel for i in range(np.size(datavector,0)): mva =", "else: return False #file not found def convert2vector(data): return np.transpose(data) #convert the data", "with 2D array #moving average of the 2D matrix #create a moving average", "of the tactile sensor featureVector = np.zeros(nrows*ncols) #matrix M*NxT where each row corresponds", "from dataprocessing import * #import the detect_peaks method #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #Feature extraction for", "3D matrix #--------------------------------------------------------------------------- #find the number of peaks in every single taxel def", "the data if self.modelSVM is not None: #classify based on the input features", "from sklearn.externals import joblib from dataprocessing import * #import the detect_peaks method #-------------------------------------------------------------------------------", "the number of peaks for #each taxel of the tactile sensor featureVector =", "feature vector #find the peaks tmppeaks = detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False) #number of peaks is the", "of peaks peakTh = 0.05 #threshold for peak detection #create the feature vector", "in range(NROWS*NCOLS): #scan all the columns for z in range(numsamples): #filtering the signal", "#------------------------------------------------------------------------------- #Feature extraction for SVM-based braille classification class BrailleHandler(): #--------------------------------------------------------------------------- #read a file", "taxel of the tactile sensor featureVector = np.zeros(nrows*ncols) #matrix M*NxT where each row", "(every line contains reading from all rows for one column) #into a 3D", "#------------------------------------------------------------------------------- # NATIONAL UNIVERSITY OF SINGAPORE - NUS # SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY", "# with the data ready for training and/or classification procedures. # For handling", "dataVector[taxelCounter] = datamat[i,j,:] taxelCounter+=1 return dataVector #return the dataVector #convert data from the", "= MovingAverage() tactileVector = BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS) numsamples = np.size(tactileData,2) #total number of samples tactileMVA", "taxel counter #with the filtered data, count peaks again filtFeatures = BrailleHandler.countPeaks(tactileMVA,peakTh) #print", "save(self,filename): #saving joblib.dump(self.modelSVM,filename+'.pkl') #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #train a SVM model def train(self,trainingData,labels): #create a", "in range(len(dataFiles)): #get the filename filename = dataFiles[k] #load the data datafile =", "False #--------------------------------------------------------------------------- #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- if __name__=='__main__': #--------------------------------------------------------------------------- import numpy as np #numpy import", "#number of samples #feature vector containing the number of peaks for #each taxel", "MovingAverage() #window size = 10, sampfreq = 100 Hz #for every sample, get", "np.zeros((NROWS*NCOLS,numsamples)) counter = 0 #taxel counter for k in range(NROWS*NCOLS): #scan all the", "# -*- coding: utf-8 -*- ''' #------------------------------------------------------------------------------- # NATIONAL UNIVERSITY OF SINGAPORE -", "mva = MovingAverage() tactileVector = BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS) numsamples = np.size(tactileData,2) #total number of samples", "# SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE # Singapore # URL: http://www.sinapseinstitute.org #-------------------------------------------------------------------------------", "i in range(np.size(datavector,0)): mva = MovingAverage() #window size = 10, sampfreq = 100", "if the file exists if os.path.isfile(filepath): self.modelSVM = joblib.load(filepath) #loads the SVM model", "peaks in the signal #and built the feature vector #find the peaks tmppeaks", "import numpy as np import scipy as sp from sklearn.svm import SVC from", "- NUS # SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE # Singapore # URL:", "of samples tactileMVA = np.zeros((NROWS*NCOLS,numsamples)) counter = 0 #taxel counter for k in", "self.modelSVM = None #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #load a pre-trained SVM model from a file", "for SVM-based braille classification class BrailleHandler(): #--------------------------------------------------------------------------- #read a file and return the", "tactile array numberTaxels = len(inputMatrix) #feature vector containing the number of peaks for", "the time series signal tactileSignal = np.zeros((nrows*ncols,nsamples)) #counter for the index of the", "#if data should be filtered if filt == True: #for every taxel for", "NUS # SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE # Singapore # URL: http://www.sinapseinstitute.org", "are arranged #in a 2D array (every line contains reading from all rows", "#features should be a feature vector following the same pattern #that was used", "Recognition Class class Braille(): def __init__(self): #labels for every class #dictionary to associate", "http://www.sinapseinstitute.org #------------------------------------------------------------------------------- # Neuromorphic Engineering Group # Author: <NAME>, PhD # Contact: #-------------------------------------------------------------------------------", "if os.path.isfile(filepath): #return the data contained in the data return np.loadtxt(filepath) else: return", "in range(np.size(datavector,0)): mva = MovingAverage() #window size = 10, sampfreq = 100 Hz", "the peaks tmppeaks = detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False) #number of peaks is the length of 'tmppeaks'", "# Neuromorphic Engineering Group # Author: <NAME>, PhD # Contact: #------------------------------------------------------------------------------- # Description:", "data to be used for # braille recognition. # The 'Braille' class stores", "with the data ready for training and/or classification procedures. # For handling data,", "300 #threshold for detecting peaks #load the braille data from file #2D matrix", "dataprocessing import * #import the detect_peaks method #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #Feature extraction for SVM-based", "every taxel for i in range(np.size(datavector,0)): mva = MovingAverage() #window size = 10,", "series of a single taxel else: #find the total number of taxels in", "None #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #load a pre-trained SVM model from a file def load(self,filepath):", "peaks #load the braille data from file #2D matrix datafile = np.loadtxt('NewData_BRC/BRC_B1.txt') #convert", "numberTaxels = len(inputMatrix) #feature vector containing the number of peaks for #each taxel", "detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False) #number of peaks is the length of 'tmppeaks' featureVector[k] = len(tmppeaks) #return", "index of the tactileSignal matrix counter = 0 #loop through the rows for", "the tactileSignal matrix counter = 0 #loop through the rows for k in", "used for training def classify(self,features): #check if there is a SVM model to", "SVM model def save(self,filename): #saving joblib.dump(self.modelSVM,filename+'.pkl') #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #train a SVM model def", "as sp from sklearn.svm import SVC from sklearn.externals import joblib from dataprocessing import", "SVM model from a file def load(self,filepath): #checks if the file exists if", "from the file that are arranged #in a 2D array (every line contains", "BrailleHandler.oldconvert2frames(data,nrows,ncols) numsamples = np.size(datamat,2) #number of samples or frames dataVector = np.zeros((nrows*ncols,numsamples)) taxelCounter", "training def classify(self,features): #check if there is a SVM model to classify the", "every element of the list corresponds to #the time series of a single", "BrailleHandler(): #--------------------------------------------------------------------------- #read a file and return the data def loadFile(filepath): if os.path.isfile(filepath):", "model def train(self,trainingData,labels): #create a new SVM model self.modelSVM = SVC() #pass the", "the classifier return svmResp else: return False #--------------------------------------------------------------------------- #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- if __name__=='__main__': #---------------------------------------------------------------------------", "= np.loadtxt('NewData_BRC/BRC_B1.txt') #convert data to a 3D matrix tactileData = BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS) #feature vector", "#find the number of peaks in every single taxel def countPeaks(inputMatrix,threshold): if len(inputMatrix.shape)", "= mva.getSample(tactileVector[k,z]) counter+=1 #increment the taxel counter #with the filtered data, count peaks", "#create the training data based on the list of the text files to", "array NCOLS = 4 #number of lines in the tactile array peakTh =", "= dataFiles[k] #load the data datafile = BrailleHandler.loadFile(filename) #convert to vector #datavector =", "#feature extraction with 2D array #moving average of the 2D matrix #create a", "in range(ncols): #get a single taxel signal tactileSignal[counter] = inputMatrix[k,w,:] #count the number", "the process of data processing, meaning that it only deals # with the", "the tactile sensor featureVector = np.zeros(numberTaxels) #scan all the taxels for k in", "= len(tmppeaks) #increment the counter counter+=1 #list of list, every element of the", "for each text data def createTrainingData(dataFiles,nrows,ncols,filt=False): for k in range(len(dataFiles)): #get the filename", "counter #with the filtered data, count peaks again filtFeatures = BrailleHandler.countPeaks(tactileMVA,peakTh) #print the", "is the length of 'tmppeaks' featureVector[counter] = len(tmppeaks) #increment the counter counter+=1 #list", "#columns to the time series signal tactileSignal = np.zeros((nrows*ncols,nsamples)) #counter for the index", "convert to 3D matrix datamat = BrailleHandler.oldconvert2frames(data,nrows,ncols) numsamples = np.size(datamat,2) #number of samples", "os.path, sys sys.path.append('../general') import numpy as np import scipy as sp from sklearn.svm", "3: #3D matrix nrows = inputMatrix.shape[0] #number of rows ncols = inputMatrix.shape[1] #number", "#total number of samples tactileMVA = np.zeros((NROWS*NCOLS,numsamples)) counter = 0 #taxel counter for", "range(NROWS*NCOLS): #scan all the columns for z in range(numsamples): #filtering the signal sample", "should be a feature vector following the same pattern #that was used for", "tactileVector = BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS) numsamples = np.size(tactileData,2) #total number of samples tactileMVA = np.zeros((NROWS*NCOLS,numsamples))", "= 0 #taxel counter for k in range(NROWS*NCOLS): #scan all the columns for", "= 100 Hz #for every sample, get the moving average response for z", "a 2D array (every line contains reading from all rows for one column)", "taxel def countPeaks(inputMatrix,threshold): if len(inputMatrix.shape) == 3: #3D matrix nrows = inputMatrix.shape[0] #number", "range(numberTaxels): #find the peaks tmppeaks = detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False) #number of peaks is the length", "for k in range(len(dataFiles)): #get the filename filename = dataFiles[k] #load the data", "every class #dictionary to associate label names and values self.classes = dict() #SVM", "len(tmppeaks) #increment the counter counter+=1 #list of list, every element of the list", "parameters, windowsize = 10, sampfreq = 100 Hz mva = MovingAverage() tactileVector =", "np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int) c = 0 for ii in range(0,(np.size(data,0)-nrows),nrows): datamat[:,:,c] = data[ii:ii+nrows,:] c =", "tactile data to be used for # braille recognition. # The 'Braille' class", "taxels in the tactile array numberTaxels = len(inputMatrix) #feature vector containing the number", "the detect_peaks method #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #Feature extraction for SVM-based braille classification class BrailleHandler():", "data processing, meaning that it only deals # with the data ready for", "all the taxels for k in range(numberTaxels): #find the peaks tmppeaks = detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False)", "from a file def load(self,filepath): #checks if the file exists if os.path.isfile(filepath): self.modelSVM", "= 0 for ii in range(0,(np.size(data,0)-nrows),nrows): datamat[:,:,c] = data[ii:ii+nrows,:] c = c+1 return", "number of peaks in every single taxel def countPeaks(inputMatrix,threshold): if len(inputMatrix.shape) == 3:", "sensor featureVector = np.zeros(nrows*ncols) #matrix M*NxT where each row corresponds to a taxel", "#matrix M*NxT where each row corresponds to a taxel and the #columns to", "0 for i in range(nrows): for j in range(ncols): dataVector[taxelCounter] = datamat[i,j,:] taxelCounter+=1", "tactile sensor featureVector = np.zeros(numberTaxels) #scan all the taxels for k in range(numberTaxels):", "mva.getSample(datavector[i,z]) #find the number of peaks peakTh = 0.05 #threshold for peak detection", "in range(numberTaxels): #find the peaks tmppeaks = detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False) #number of peaks is the", "z in range(np.size(datavector,1)): datavector[i,z] = mva.getSample(datavector[i,z]) #find the number of peaks peakTh =", "time series of a single taxel else: #find the total number of taxels", "joblib.dump(self.modelSVM,filename+'.pkl') #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #train a SVM model def train(self,trainingData,labels): #create a new SVM", "a file into a vector def oldconvert2vector(data,nrows,ncols): #first convert to 3D matrix datamat", "#if it is the first iteration, create the training data if k !=", "= np.zeros((nrows*ncols,nsamples)) #counter for the index of the tactileSignal matrix counter = 0", "in range(0,(np.size(data,0)-nrows),nrows): datamat[:,:,c] = data[ii:ii+nrows,:] c = c+1 return datamat #return the 3D", "in the tactile array NCOLS = 4 #number of lines in the tactile", "to recognize braille characters. # this class abstracts the process of data processing,", "For handling data, the class 'BrailleHandler' should be used instead #------------------------------------------------------------------------------- ''' #-------------------------------------------------------------------------------", "inputMatrix.shape[2] #number of samples #feature vector containing the number of peaks for #each", "else: return False #--------------------------------------------------------------------------- #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- if __name__=='__main__': #--------------------------------------------------------------------------- import numpy as np", "np import scipy as sp from sklearn.svm import SVC from sklearn.externals import joblib", "data from a file into a vector def oldconvert2vector(data,nrows,ncols): #first convert to 3D", "counter counter+=1 #list of list, every element of the list corresponds to #the", "== True: #for every taxel for i in range(np.size(datavector,0)): mva = MovingAverage() #window", "number of peaks for #each taxel of the tactile sensor featureVector = np.zeros(nrows*ncols)", "the total number of taxels in the tactile array numberTaxels = len(inputMatrix) #feature", "= BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS) numsamples = np.size(tactileData,2) #total number of samples tactileMVA = np.zeros((NROWS*NCOLS,numsamples)) counter", "all the columns for z in range(numsamples): #filtering the signal sample by sample", "single taxel signal tactileSignal[counter] = inputMatrix[k,w,:] #count the number of peaks in the", "if __name__=='__main__': #--------------------------------------------------------------------------- import numpy as np #numpy import matplotlib.pyplot as plt #matplotlib", "#that was used for training def classify(self,features): #check if there is a SVM", "this class abstracts the process of data processing, meaning that it only deals", "the taxel counter #with the filtered data, count peaks again filtFeatures = BrailleHandler.countPeaks(tactileMVA,peakTh)", "for one column) #into a 3D array (row,col,frame) def oldconvert2frames(data,nrows,ncols): datamat = np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int)", "columns nsamples = inputMatrix.shape[2] #number of samples #feature vector containing the number of", "= BrailleHandler.countPeaks(datavector,peakTh) #if it is the first iteration, create the training data if", "of peaks for #each taxel of the tactile sensor featureVector = np.zeros(numberTaxels) #scan", "the peaks tmppeaks = detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False) #number of peaks is the length of 'tmppeaks'", "windowsize = 10, sampfreq = 100 Hz mva = MovingAverage() tactileVector = BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS)", "meaning that it only deals # with the data ready for training and/or", "tmppeaks = detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False) #number of peaks is the length of 'tmppeaks' featureVector[counter] =", "joblib.load(filepath) #loads the SVM model return True #load ok else: return False #file", "taxel of the tactile sensor featureVector = np.zeros(numberTaxels) #scan all the taxels for", "#classification #features should be a feature vector following the same pattern #that was", "c+1 return datamat #return the 3D matrix #--------------------------------------------------------------------------- #find the number of peaks", "model used to recognize braille characters. # this class abstracts the process of", "-*- coding: utf-8 -*- ''' #------------------------------------------------------------------------------- # NATIONAL UNIVERSITY OF SINGAPORE - NUS", "signal #and built the feature vector #find the peaks tmppeaks = detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False) #number", "train(self,trainingData,labels): #create a new SVM model self.modelSVM = SVC() #pass the training data", "featureVector[counter] = len(tmppeaks) #increment the counter counter+=1 #list of list, every element of", "be a feature vector following the same pattern #that was used for training", "line contains reading from all rows for one column) #into a 3D array", "sklearn.externals import joblib from dataprocessing import * #import the detect_peaks method #------------------------------------------------------------------------------- #-------------------------------------------------------------------------------", "datamat[:,:,c] = data[ii:ii+nrows,:] c = c+1 return datamat #return the 3D matrix #---------------------------------------------------------------------------", "False #file not found def convert2vector(data): return np.transpose(data) #convert the data from a", "in every single taxel def countPeaks(inputMatrix,threshold): if len(inputMatrix.shape) == 3: #3D matrix nrows", "(row,col,frame) def oldconvert2frames(data,nrows,ncols): datamat = np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int) c = 0 for ii in range(0,(np.size(data,0)-nrows),nrows):", "# URL: http://www.sinapseinstitute.org #------------------------------------------------------------------------------- # Neuromorphic Engineering Group # Author: <NAME>, PhD #", "found def convert2vector(data): return np.transpose(data) #convert the data from a file into a", "the data contained in the data return np.loadtxt(filepath) else: return False #file not", "the data ready for training and/or classification procedures. # For handling data, the", "reading from all rows for one column) #into a 3D array (row,col,frame) def", "the feature vector return featureVector #------------------------------------------------------------------------------- #create the training data based on the", "vector def oldconvert2vector(data,nrows,ncols): #first convert to 3D matrix datamat = BrailleHandler.oldconvert2frames(data,nrows,ncols) numsamples =", "for k in range(numberTaxels): #find the peaks tmppeaks = detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False) #number of peaks", "def train(self,trainingData,labels): #create a new SVM model self.modelSVM = SVC() #pass the training", "#increment the taxel counter #with the filtered data, count peaks again filtFeatures =", "vector #find the peaks tmppeaks = detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False) #number of peaks is the length", "braille recognition. # The 'Braille' class stores the SVM model used to recognize", "model def save(self,filename): #saving joblib.dump(self.modelSVM,filename+'.pkl') #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #train a SVM model def train(self,trainingData,labels):", "#save a new SVM model def save(self,filename): #saving joblib.dump(self.modelSVM,filename+'.pkl') #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #train a", "as np #numpy import matplotlib.pyplot as plt #matplotlib NROWS = 4 #number of", "np.loadtxt('NewData_BRC/BRC_B1.txt') #convert data to a 3D matrix tactileData = BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS) #feature vector containing", "the data datafile = BrailleHandler.loadFile(filename) #convert to vector #datavector = BrailleHandler.oldconvert2vector(datafile,nrows,ncols) datavector =", "of peaks in every single taxel def countPeaks(inputMatrix,threshold): if len(inputMatrix.shape) == 3: #3D", "to vector #datavector = BrailleHandler.oldconvert2vector(datafile,nrows,ncols) datavector = BrailleHandler.convert2vector(datafile) #if data should be filtered", "#scan all the taxels for k in range(numberTaxels): #find the peaks tmppeaks =", "#SVM model self.modelSVM = None #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #load a pre-trained SVM model from", "#feature vector containing the number of peaks for each taxel features = BrailleHandler.countPeaks(tactileData,peakTh)", "100 Hz mva = MovingAverage() tactileVector = BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS) numsamples = np.size(tactileData,2) #total number", "total number of taxels in the tactile array numberTaxels = len(inputMatrix) #feature vector", "element of the list corresponds to #the time series of a single taxel", "the first iteration, create the training data if k != 0: trainingData =", "there is a SVM model to classify the data if self.modelSVM is not", "MovingAverage() tactileVector = BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS) numsamples = np.size(tactileData,2) #total number of samples tactileMVA =", "the feature vector featurevector = BrailleHandler.countPeaks(datavector,peakTh) #if it is the first iteration, create", "#number of samples or frames dataVector = np.zeros((nrows*ncols,numsamples)) taxelCounter = 0 for i", "- SINAPSE # Singapore # URL: http://www.sinapseinstitute.org #------------------------------------------------------------------------------- # Neuromorphic Engineering Group #", "= inputMatrix.shape[0] #number of rows ncols = inputMatrix.shape[1] #number of columns nsamples =", "built the feature vector #find the peaks tmppeaks = detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False) #number of peaks", "of 'tmppeaks' featureVector[k] = len(tmppeaks) #return the feature vector return featureVector #------------------------------------------------------------------------------- #create", "data def createTrainingData(dataFiles,nrows,ncols,filt=False): for k in range(len(dataFiles)): #get the filename filename = dataFiles[k]", "a 3D array (row,col,frame) def oldconvert2frames(data,nrows,ncols): datamat = np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int) c = 0 for", "the index of the tactileSignal matrix counter = 0 #loop through the rows", "#labels for every class #dictionary to associate label names and values self.classes =", "model self.modelSVM = SVC() #pass the training data and the labels for training", "def save(self,filename): #saving joblib.dump(self.modelSVM,filename+'.pkl') #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #train a SVM model def train(self,trainingData,labels): #create", "k in range(len(dataFiles)): #get the filename filename = dataFiles[k] #load the data datafile", "def load(self,filepath): #checks if the file exists if os.path.isfile(filepath): self.modelSVM = joblib.load(filepath) #loads", "= None #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #load a pre-trained SVM model from a file def", "file that are arranged #in a 2D array (every line contains reading from", "counter+=1 #increment the taxel counter #with the filtered data, count peaks again filtFeatures", "return dataVector #return the dataVector #convert data from the file that are arranged", "vector featurevector = BrailleHandler.countPeaks(datavector,peakTh) #if it is the first iteration, create the training", "the labels for training self.modelSVM.fit(trainingData,labels) #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #classification #features should be a feature", "file #2D matrix datafile = np.loadtxt('NewData_BRC/BRC_B1.txt') #convert data to a 3D matrix tactileData", "#number of columns nsamples = inputMatrix.shape[2] #number of samples #feature vector containing the", "signal tactileSignal = np.zeros((nrows*ncols,nsamples)) #counter for the index of the tactileSignal matrix counter", "= BrailleHandler.loadFile(filename) #convert to vector #datavector = BrailleHandler.oldconvert2vector(datafile,nrows,ncols) datavector = BrailleHandler.convert2vector(datafile) #if data", "data should be filtered if filt == True: #for every taxel for i", "a taxel and the #columns to the time series signal tactileSignal = np.zeros((nrows*ncols,nsamples))", "= 0 for i in range(nrows): for j in range(ncols): dataVector[taxelCounter] = datamat[i,j,:]", "and values self.classes = dict() #SVM model self.modelSVM = None #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #load", "return False #file not found def convert2vector(data): return np.transpose(data) #convert the data from", "#--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #train a SVM model def train(self,trainingData,labels): #create a new SVM model", "'tmppeaks' featureVector[k] = len(tmppeaks) #return the feature vector return featureVector #------------------------------------------------------------------------------- #create the", "= np.zeros(nrows*ncols) #matrix M*NxT where each row corresponds to a taxel and the", "= 4 #number of columns in the tactile array NCOLS = 4 #number", "in range(numsamples): #filtering the signal sample by sample tactileMVA[counter,z] = mva.getSample(tactileVector[k,z]) counter+=1 #increment", "on the list of the text files to be loaded #and the labels", "#------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #LIBRARIES import os, os.path, sys sys.path.append('../general') import numpy as np import", "#pass the training data and the labels for training self.modelSVM.fit(trainingData,labels) #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #classification", "of taxels in the tactile array numberTaxels = len(inputMatrix) #feature vector containing the", "inputMatrix.shape[0] #number of rows ncols = inputMatrix.shape[1] #number of columns nsamples = inputMatrix.shape[2]", "the feature vector #find the peaks tmppeaks = detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False) #number of peaks is", "10, sampfreq = 100 Hz #for every sample, get the moving average response", "data, the class 'BrailleHandler' should be used instead #------------------------------------------------------------------------------- ''' #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #LIBRARIES", "#increment the counter counter+=1 #list of list, every element of the list corresponds", "is a SVM model to classify the data if self.modelSVM is not None:", "a new SVM model self.modelSVM = SVC() #pass the training data and the", "instead #------------------------------------------------------------------------------- ''' #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #LIBRARIES import os, os.path, sys sys.path.append('../general') import numpy", "dict() #SVM model self.modelSVM = None #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #load a pre-trained SVM model", "to be loaded #and the labels corresponding for each text data def createTrainingData(dataFiles,nrows,ncols,filt=False):", "datavector = BrailleHandler.convert2vector(datafile) #if data should be filtered if filt == True: #for", "SVM model def train(self,trainingData,labels): #create a new SVM model self.modelSVM = SVC() #pass", "#classify based on the input features svmResp = self.modelSVM.predict(features) #return the output of", "range(ncols): #get a single taxel signal tactileSignal[counter] = inputMatrix[k,w,:] #count the number of", "tactileMVA = np.zeros((NROWS*NCOLS,numsamples)) counter = 0 #taxel counter for k in range(NROWS*NCOLS): #scan", "= np.zeros((NROWS*NCOLS,numsamples)) counter = 0 #taxel counter for k in range(NROWS*NCOLS): #scan all", "the input features svmResp = self.modelSVM.predict(features) #return the output of the classifier return", "= np.zeros((nrows*ncols,numsamples)) taxelCounter = 0 for i in range(nrows): for j in range(ncols):", "svmResp else: return False #--------------------------------------------------------------------------- #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- if __name__=='__main__': #--------------------------------------------------------------------------- import numpy as", "from a file into a vector def oldconvert2vector(data,nrows,ncols): #first convert to 3D matrix", "= self.modelSVM.predict(features) #return the output of the classifier return svmResp else: return False", "data contained in the data return np.loadtxt(filepath) else: return False #file not found", "the SVM model used to recognize braille characters. # this class abstracts the", "the columns for z in range(numsamples): #filtering the signal sample by sample tactileMVA[counter,z]", "np.zeros(nrows*ncols) #matrix M*NxT where each row corresponds to a taxel and the #columns", "#check if there is a SVM model to classify the data if self.modelSVM", "feature vector return featureVector #------------------------------------------------------------------------------- #create the training data based on the list", "sp from sklearn.svm import SVC from sklearn.externals import joblib from dataprocessing import *", "import * #import the detect_peaks method #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #Feature extraction for SVM-based braille", "abstracts the process of data processing, meaning that it only deals # with", "if os.path.isfile(filepath): self.modelSVM = joblib.load(filepath) #loads the SVM model return True #load ok", "np.zeros(numberTaxels) #scan all the taxels for k in range(numberTaxels): #find the peaks tmppeaks", "tactileSignal[counter] = inputMatrix[k,w,:] #count the number of peaks in the signal #and built", "should be filtered if filt == True: #for every taxel for i in", "of columns in the tactile array NCOLS = 4 #number of lines in", "training and/or classification procedures. # For handling data, the class 'BrailleHandler' should be", "#------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #Feature extraction for SVM-based braille classification class BrailleHandler(): #--------------------------------------------------------------------------- #read a", "= len(tmppeaks) #return the feature vector return featureVector #------------------------------------------------------------------------------- #create the training data", "return False #--------------------------------------------------------------------------- #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- if __name__=='__main__': #--------------------------------------------------------------------------- import numpy as np #numpy", "''' #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #LIBRARIES import os, os.path, sys sys.path.append('../general') import numpy as np", "return datamat #return the 3D matrix #--------------------------------------------------------------------------- #find the number of peaks in", "the counter counter+=1 #list of list, every element of the list corresponds to", "range(numsamples): #filtering the signal sample by sample tactileMVA[counter,z] = mva.getSample(tactileVector[k,z]) counter+=1 #increment the", "createTrainingData(dataFiles,nrows,ncols,filt=False): for k in range(len(dataFiles)): #get the filename filename = dataFiles[k] #load the", "nsamples = inputMatrix.shape[2] #number of samples #feature vector containing the number of peaks", "0.05 #threshold for peak detection #create the feature vector featurevector = BrailleHandler.countPeaks(datavector,peakTh) #if", "the braille data from file #2D matrix datafile = np.loadtxt('NewData_BRC/BRC_B1.txt') #convert data to", "in the signal #and built the feature vector #find the peaks tmppeaks =", "corresponds to #the time series of a single taxel else: #find the total", "the list corresponds to #the time series of a single taxel else: #find", "peaks in every single taxel def countPeaks(inputMatrix,threshold): if len(inputMatrix.shape) == 3: #3D matrix", "a single taxel else: #find the total number of taxels in the tactile", "data from the file that are arranged #in a 2D array (every line", "0 #loop through the rows for k in range(nrows): #loop through the columns", "the filtered data, count peaks again filtFeatures = BrailleHandler.countPeaks(tactileMVA,peakTh) #print the filtered feature", "the rows for k in range(nrows): #loop through the columns for w in", "#load the data datafile = BrailleHandler.loadFile(filename) #convert to vector #datavector = BrailleHandler.oldconvert2vector(datafile,nrows,ncols) datavector", "k in range(NROWS*NCOLS): #scan all the columns for z in range(numsamples): #filtering the", "numpy as np #numpy import matplotlib.pyplot as plt #matplotlib NROWS = 4 #number", "of the classifier return svmResp else: return False #--------------------------------------------------------------------------- #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- if __name__=='__main__':", "taxelCounter+=1 return dataVector #return the dataVector #convert data from the file that are", "#the time series of a single taxel else: #find the total number of", "#return the feature vector return featureVector #------------------------------------------------------------------------------- #create the training data based on", "to associate label names and values self.classes = dict() #SVM model self.modelSVM =", "from all rows for one column) #into a 3D array (row,col,frame) def oldconvert2frames(data,nrows,ncols):", "data[ii:ii+nrows,:] c = c+1 return datamat #return the 3D matrix #--------------------------------------------------------------------------- #find the", "training data based on the list of the text files to be loaded", "#--------------------------------------------------------------------------- #classification #features should be a feature vector following the same pattern #that", "dataVector = np.zeros((nrows*ncols,numsamples)) taxelCounter = 0 for i in range(nrows): for j in", "10, sampfreq = 100 Hz mva = MovingAverage() tactileVector = BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS) numsamples =", "''' #------------------------------------------------------------------------------- # NATIONAL UNIVERSITY OF SINGAPORE - NUS # SINGAPORE INSTITUTE FOR", "#feature vector containing the number of peaks for #each taxel of the tactile", "peaks for #each taxel of the tactile sensor featureVector = np.zeros(numberTaxels) #scan all", "of list, every element of the list corresponds to #the time series of", "extraction for SVM-based braille classification class BrailleHandler(): #--------------------------------------------------------------------------- #read a file and return", "__name__=='__main__': #--------------------------------------------------------------------------- import numpy as np #numpy import matplotlib.pyplot as plt #matplotlib NROWS", "every sample, get the moving average response for z in range(np.size(datavector,1)): datavector[i,z] =", "peaks tmppeaks = detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False) #number of peaks is the length of 'tmppeaks' featureVector[k]", "vector return featureVector #------------------------------------------------------------------------------- #create the training data based on the list of", "for training def classify(self,features): #check if there is a SVM model to classify", "and the #columns to the time series signal tactileSignal = np.zeros((nrows*ncols,nsamples)) #counter for", "#convert data from the file that are arranged #in a 2D array (every", "procedures. # For handling data, the class 'BrailleHandler' should be used instead #-------------------------------------------------------------------------------", "= inputMatrix.shape[1] #number of columns nsamples = inputMatrix.shape[2] #number of samples #feature vector", "#filtering the signal sample by sample tactileMVA[counter,z] = mva.getSample(tactileVector[k,z]) counter+=1 #increment the taxel", "is the length of 'tmppeaks' featureVector[k] = len(tmppeaks) #return the feature vector return", "#first convert to 3D matrix datamat = BrailleHandler.oldconvert2frames(data,nrows,ncols) numsamples = np.size(datamat,2) #number of", "the number of peaks peakTh = 0.05 #threshold for peak detection #create the", "matplotlib.pyplot as plt #matplotlib NROWS = 4 #number of columns in the tactile", "a 3D matrix tactileData = BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS) #feature vector containing the number of peaks", "return svmResp else: return False #--------------------------------------------------------------------------- #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- if __name__=='__main__': #--------------------------------------------------------------------------- import numpy", "peaks for each taxel features = BrailleHandler.countPeaks(tactileData,peakTh) #--------------------------------------------------------------------------- #feature extraction with 2D array", "#convert data to a 3D matrix tactileData = BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS) #feature vector containing the", "vector following the same pattern #that was used for training def classify(self,features): #check", "= 0 #loop through the rows for k in range(nrows): #loop through the", "for detecting peaks #load the braille data from file #2D matrix datafile =", "in range(nrows): #loop through the columns for w in range(ncols): #get a single", "#loads the SVM model return True #load ok else: return False #file not", "classify the data if self.modelSVM is not None: #classify based on the input", "datavector[i,z] = mva.getSample(datavector[i,z]) #find the number of peaks peakTh = 0.05 #threshold for", "array #moving average of the 2D matrix #create a moving average object #default", "features svmResp = self.modelSVM.predict(features) #return the output of the classifier return svmResp else:", "2D matrix #create a moving average object #default parameters, windowsize = 10, sampfreq", "matrix datamat = BrailleHandler.oldconvert2frames(data,nrows,ncols) numsamples = np.size(datamat,2) #number of samples or frames dataVector", "4 #number of lines in the tactile array peakTh = 300 #threshold for", "the #columns to the time series signal tactileSignal = np.zeros((nrows*ncols,nsamples)) #counter for the", "should be used instead #------------------------------------------------------------------------------- ''' #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #LIBRARIES import os, os.path, sys", "tactileSignal = np.zeros((nrows*ncols,nsamples)) #counter for the index of the tactileSignal matrix counter =", "as np import scipy as sp from sklearn.svm import SVC from sklearn.externals import", "range(len(dataFiles)): #get the filename filename = dataFiles[k] #load the data datafile = BrailleHandler.loadFile(filename)", "sampfreq = 100 Hz #for every sample, get the moving average response for", "get the moving average response for z in range(np.size(datavector,1)): datavector[i,z] = mva.getSample(datavector[i,z]) #find", "used for # braille recognition. # The 'Braille' class stores the SVM model", "#LIBRARIES import os, os.path, sys sys.path.append('../general') import numpy as np import scipy as", "plt #matplotlib NROWS = 4 #number of columns in the tactile array NCOLS", "np.loadtxt(filepath) else: return False #file not found def convert2vector(data): return np.transpose(data) #convert the", "filtered data, count peaks again filtFeatures = BrailleHandler.countPeaks(tactileMVA,peakTh) #print the filtered feature vector", "counter+=1 #list of list, every element of the list corresponds to #the time", "M*NxT where each row corresponds to a taxel and the #columns to the", "to be used for # braille recognition. # The 'Braille' class stores the", "self.modelSVM.predict(features) #return the output of the classifier return svmResp else: return False #---------------------------------------------------------------------------", "SVM-based braille classification class BrailleHandler(): #--------------------------------------------------------------------------- #read a file and return the data", "SINAPSE # Singapore # URL: http://www.sinapseinstitute.org #------------------------------------------------------------------------------- # Neuromorphic Engineering Group # Author:", "= np.size(tactileData,2) #total number of samples tactileMVA = np.zeros((NROWS*NCOLS,numsamples)) counter = 0 #taxel", "processing tactile data to be used for # braille recognition. # The 'Braille'", "#--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #save a new SVM model def save(self,filename): #saving joblib.dump(self.modelSVM,filename+'.pkl') #--------------------------------------------------------------------------- #---------------------------------------------------------------------------", "self.modelSVM.fit(trainingData,labels) #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #classification #features should be a feature vector following the same", "and return the data def loadFile(filepath): if os.path.isfile(filepath): #return the data contained in", "containing the number of peaks for each taxel features = BrailleHandler.countPeaks(tactileData,peakTh) #--------------------------------------------------------------------------- #feature", "#------------------------------------------------------------------------------- #------------------------------------------------------------------------------- if __name__=='__main__': #--------------------------------------------------------------------------- import numpy as np #numpy import matplotlib.pyplot as", "# For handling data, the class 'BrailleHandler' should be used instead #------------------------------------------------------------------------------- '''", "matrix tactileData = BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS) #feature vector containing the number of peaks for each", "= featurevector return trainingData #------------------------------------------------------------------------------- #Braille Recognition Class class Braille(): def __init__(self): #labels", "input features svmResp = self.modelSVM.predict(features) #return the output of the classifier return svmResp", "classifier return svmResp else: return False #--------------------------------------------------------------------------- #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- if __name__=='__main__': #--------------------------------------------------------------------------- import", "0 for ii in range(0,(np.size(data,0)-nrows),nrows): datamat[:,:,c] = data[ii:ii+nrows,:] c = c+1 return datamat", "of peaks for each taxel features = BrailleHandler.countPeaks(tactileData,peakTh) #--------------------------------------------------------------------------- #feature extraction with 2D", "tactile array peakTh = 300 #threshold for detecting peaks #load the braille data", "= BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS) #feature vector containing the number of peaks for each taxel features", "def loadFile(filepath): if os.path.isfile(filepath): #return the data contained in the data return np.loadtxt(filepath)", "vector containing the number of peaks for #each taxel of the tactile sensor", "SINGAPORE - NUS # SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE # Singapore #", "new SVM model def save(self,filename): #saving joblib.dump(self.modelSVM,filename+'.pkl') #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #train a SVM model", "matrix counter = 0 #loop through the rows for k in range(nrows): #loop", "inputMatrix[k,w,:] #count the number of peaks in the signal #and built the feature", "dataFiles[k] #load the data datafile = BrailleHandler.loadFile(filename) #convert to vector #datavector = BrailleHandler.oldconvert2vector(datafile,nrows,ncols)", "tmppeaks = detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False) #number of peaks is the length of 'tmppeaks' featureVector[k] =", "list corresponds to #the time series of a single taxel else: #find the", "the signal #and built the feature vector #find the peaks tmppeaks = detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False)", "for k in range(NROWS*NCOLS): #scan all the columns for z in range(numsamples): #filtering", "the dataVector #convert data from the file that are arranged #in a 2D", "#get the filename filename = dataFiles[k] #load the data datafile = BrailleHandler.loadFile(filename) #convert", "a file def load(self,filepath): #checks if the file exists if os.path.isfile(filepath): self.modelSVM =", "#loop through the rows for k in range(nrows): #loop through the columns for", "taxels for k in range(numberTaxels): #find the peaks tmppeaks = detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False) #number of", "number of peaks in the signal #and built the feature vector #find the", "file def load(self,filepath): #checks if the file exists if os.path.isfile(filepath): self.modelSVM = joblib.load(filepath)", "peakTh = 300 #threshold for detecting peaks #load the braille data from file", "the filename filename = dataFiles[k] #load the data datafile = BrailleHandler.loadFile(filename) #convert to", "#find the peaks tmppeaks = detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False) #number of peaks is the length of", "file exists if os.path.isfile(filepath): self.modelSVM = joblib.load(filepath) #loads the SVM model return True", "from sklearn.svm import SVC from sklearn.externals import joblib from dataprocessing import * #import", "nrows = inputMatrix.shape[0] #number of rows ncols = inputMatrix.shape[1] #number of columns nsamples", "3D array (row,col,frame) def oldconvert2frames(data,nrows,ncols): datamat = np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int) c = 0 for ii", "length of 'tmppeaks' featureVector[k] = len(tmppeaks) #return the feature vector return featureVector #-------------------------------------------------------------------------------", "def __init__(self): #labels for every class #dictionary to associate label names and values", "in the tactile array numberTaxels = len(inputMatrix) #feature vector containing the number of", "#create a new SVM model self.modelSVM = SVC() #pass the training data and", "#------------------------------------------------------------------------------- #LIBRARIES import os, os.path, sys sys.path.append('../general') import numpy as np import scipy", "featurevector = BrailleHandler.countPeaks(datavector,peakTh) #if it is the first iteration, create the training data", "series signal tactileSignal = np.zeros((nrows*ncols,nsamples)) #counter for the index of the tactileSignal matrix", "first iteration, create the training data if k != 0: trainingData = np.vstack((trainingData,featurevector))", "for w in range(ncols): #get a single taxel signal tactileSignal[counter] = inputMatrix[k,w,:] #count", "OF SINGAPORE - NUS # SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE # Singapore", "detect_peaks method #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #Feature extraction for SVM-based braille classification class BrailleHandler(): #---------------------------------------------------------------------------", "#--------------------------------------------------------------------------- #train a SVM model def train(self,trainingData,labels): #create a new SVM model self.modelSVM", "#Braille Recognition Class class Braille(): def __init__(self): #labels for every class #dictionary to", "for the index of the tactileSignal matrix counter = 0 #loop through the", "j in range(ncols): dataVector[taxelCounter] = datamat[i,j,:] taxelCounter+=1 return dataVector #return the dataVector #convert", "average object #default parameters, windowsize = 10, sampfreq = 100 Hz mva =", "taxel for i in range(np.size(datavector,0)): mva = MovingAverage() #window size = 10, sampfreq", "the 3D matrix #--------------------------------------------------------------------------- #find the number of peaks in every single taxel", "based on the input features svmResp = self.modelSVM.predict(features) #return the output of the", "def convert2vector(data): return np.transpose(data) #convert the data from a file into a vector", "the number of peaks in every single taxel def countPeaks(inputMatrix,threshold): if len(inputMatrix.shape) ==", "a SVM model def train(self,trainingData,labels): #create a new SVM model self.modelSVM = SVC()", "= SVC() #pass the training data and the labels for training self.modelSVM.fit(trainingData,labels) #---------------------------------------------------------------------------", "processing, meaning that it only deals # with the data ready for training", "recognition. # The 'Braille' class stores the SVM model used to recognize braille", "file and return the data def loadFile(filepath): if os.path.isfile(filepath): #return the data contained", "= detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False) #number of peaks is the length of 'tmppeaks' featureVector[counter] = len(tmppeaks)", "#list of list, every element of the list corresponds to #the time series", "peakTh = 0.05 #threshold for peak detection #create the feature vector featurevector =", "#convert the data from a file into a vector def oldconvert2vector(data,nrows,ncols): #first convert", "return np.loadtxt(filepath) else: return False #file not found def convert2vector(data): return np.transpose(data) #convert", "#saving joblib.dump(self.modelSVM,filename+'.pkl') #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #train a SVM model def train(self,trainingData,labels): #create a new", "Hz #for every sample, get the moving average response for z in range(np.size(datavector,1)):", "'BrailleHandler' should be used instead #------------------------------------------------------------------------------- ''' #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #LIBRARIES import os, os.path,", "number of peaks for #each taxel of the tactile sensor featureVector = np.zeros(numberTaxels)", "the same pattern #that was used for training def classify(self,features): #check if there", "import SVC from sklearn.externals import joblib from dataprocessing import * #import the detect_peaks", "method #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #Feature extraction for SVM-based braille classification class BrailleHandler(): #--------------------------------------------------------------------------- #read", "def countPeaks(inputMatrix,threshold): if len(inputMatrix.shape) == 3: #3D matrix nrows = inputMatrix.shape[0] #number of", "#number of peaks is the length of 'tmppeaks' featureVector[k] = len(tmppeaks) #return the", "of 'tmppeaks' featureVector[counter] = len(tmppeaks) #increment the counter counter+=1 #list of list, every", "by sample tactileMVA[counter,z] = mva.getSample(tactileVector[k,z]) counter+=1 #increment the taxel counter #with the filtered", "# NATIONAL UNIVERSITY OF SINGAPORE - NUS # SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY -", "names and values self.classes = dict() #SVM model self.modelSVM = None #--------------------------------------------------------------------------- #---------------------------------------------------------------------------", "return False #file not found #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #save a new SVM model def", "data ready for training and/or classification procedures. # For handling data, the class", "3D matrix datamat = BrailleHandler.oldconvert2frames(data,nrows,ncols) numsamples = np.size(datamat,2) #number of samples or frames", "if self.modelSVM is not None: #classify based on the input features svmResp =", "True: #for every taxel for i in range(np.size(datavector,0)): mva = MovingAverage() #window size", "the output of the classifier return svmResp else: return False #--------------------------------------------------------------------------- #------------------------------------------------------------------------------- #-------------------------------------------------------------------------------", "data if self.modelSVM is not None: #classify based on the input features svmResp", "as plt #matplotlib NROWS = 4 #number of columns in the tactile array", "the tactile array peakTh = 300 #threshold for detecting peaks #load the braille", "feature vector featurevector = BrailleHandler.countPeaks(datavector,peakTh) #if it is the first iteration, create the", "SVC from sklearn.externals import joblib from dataprocessing import * #import the detect_peaks method", "convert2vector(data): return np.transpose(data) #convert the data from a file into a vector def", "#in a 2D array (every line contains reading from all rows for one", "featureVector #------------------------------------------------------------------------------- #create the training data based on the list of the text", "that are arranged #in a 2D array (every line contains reading from all", "vector #datavector = BrailleHandler.oldconvert2vector(datafile,nrows,ncols) datavector = BrailleHandler.convert2vector(datafile) #if data should be filtered if", "#taxel counter for k in range(NROWS*NCOLS): #scan all the columns for z in", "peak detection #create the feature vector featurevector = BrailleHandler.countPeaks(datavector,peakTh) #if it is the", "or frames dataVector = np.zeros((nrows*ncols,numsamples)) taxelCounter = 0 for i in range(nrows): for", "100 Hz #for every sample, get the moving average response for z in", "to classify the data if self.modelSVM is not None: #classify based on the", "each row corresponds to a taxel and the #columns to the time series", "of rows ncols = inputMatrix.shape[1] #number of columns nsamples = inputMatrix.shape[2] #number of", "countPeaks(inputMatrix,threshold): if len(inputMatrix.shape) == 3: #3D matrix nrows = inputMatrix.shape[0] #number of rows", "number of taxels in the tactile array numberTaxels = len(inputMatrix) #feature vector containing", "!= 0: trainingData = np.vstack((trainingData,featurevector)) else: trainingData = featurevector return trainingData #------------------------------------------------------------------------------- #Braille", "used instead #------------------------------------------------------------------------------- ''' #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #LIBRARIES import os, os.path, sys sys.path.append('../general') import", "#train a SVM model def train(self,trainingData,labels): #create a new SVM model self.modelSVM =", "else: #find the total number of taxels in the tactile array numberTaxels =", "z in range(numsamples): #filtering the signal sample by sample tactileMVA[counter,z] = mva.getSample(tactileVector[k,z]) counter+=1", "the data return np.loadtxt(filepath) else: return False #file not found def convert2vector(data): return", "4 #number of columns in the tactile array NCOLS = 4 #number of", "k != 0: trainingData = np.vstack((trainingData,featurevector)) else: trainingData = featurevector return trainingData #-------------------------------------------------------------------------------", "self.modelSVM is not None: #classify based on the input features svmResp = self.modelSVM.predict(features)", "the taxels for k in range(numberTaxels): #find the peaks tmppeaks = detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False) #number", "response for z in range(np.size(datavector,1)): datavector[i,z] = mva.getSample(datavector[i,z]) #find the number of peaks", "data datafile = BrailleHandler.loadFile(filename) #convert to vector #datavector = BrailleHandler.oldconvert2vector(datafile,nrows,ncols) datavector = BrailleHandler.convert2vector(datafile)", "training data if k != 0: trainingData = np.vstack((trainingData,featurevector)) else: trainingData = featurevector", "= BrailleHandler.countPeaks(tactileData,peakTh) #--------------------------------------------------------------------------- #feature extraction with 2D array #moving average of the 2D", "for z in range(numsamples): #filtering the signal sample by sample tactileMVA[counter,z] = mva.getSample(tactileVector[k,z])", "= dict() #SVM model self.modelSVM = None #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #load a pre-trained SVM", "range(0,(np.size(data,0)-nrows),nrows): datamat[:,:,c] = data[ii:ii+nrows,:] c = c+1 return datamat #return the 3D matrix", "self.modelSVM = joblib.load(filepath) #loads the SVM model return True #load ok else: return", "to a 3D matrix tactileData = BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS) #feature vector containing the number of", "matrix datafile = np.loadtxt('NewData_BRC/BRC_B1.txt') #convert data to a 3D matrix tactileData = BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS)", "#------------------------------------------------------------------------------- # Neuromorphic Engineering Group # Author: <NAME>, PhD # Contact: #------------------------------------------------------------------------------- #", "joblib from dataprocessing import * #import the detect_peaks method #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #Feature extraction", "number of samples tactileMVA = np.zeros((NROWS*NCOLS,numsamples)) counter = 0 #taxel counter for k", "= 10, sampfreq = 100 Hz #for every sample, get the moving average", "counter for k in range(NROWS*NCOLS): #scan all the columns for z in range(numsamples):", "NATIONAL UNIVERSITY OF SINGAPORE - NUS # SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE", "of the list corresponds to #the time series of a single taxel else:", "corresponds to a taxel and the #columns to the time series signal tactileSignal", "#return the output of the classifier return svmResp else: return False #--------------------------------------------------------------------------- #-------------------------------------------------------------------------------", "#create a moving average object #default parameters, windowsize = 10, sampfreq = 100", "trainingData = np.vstack((trainingData,featurevector)) else: trainingData = featurevector return trainingData #------------------------------------------------------------------------------- #Braille Recognition Class", "arranged #in a 2D array (every line contains reading from all rows for", "#default parameters, windowsize = 10, sampfreq = 100 Hz mva = MovingAverage() tactileVector", "= datamat[i,j,:] taxelCounter+=1 return dataVector #return the dataVector #convert data from the file", "#numpy import matplotlib.pyplot as plt #matplotlib NROWS = 4 #number of columns in", "associate label names and values self.classes = dict() #SVM model self.modelSVM = None", "be used instead #------------------------------------------------------------------------------- ''' #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #LIBRARIES import os, os.path, sys sys.path.append('../general')", "BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS) numsamples = np.size(tactileData,2) #total number of samples tactileMVA = np.zeros((NROWS*NCOLS,numsamples)) counter =", "dataVector #convert data from the file that are arranged #in a 2D array", "#--------------------------------------------------------------------------- import numpy as np #numpy import matplotlib.pyplot as plt #matplotlib NROWS =", "= 300 #threshold for detecting peaks #load the braille data from file #2D", "list of the text files to be loaded #and the labels corresponding for", "the labels corresponding for each text data def createTrainingData(dataFiles,nrows,ncols,filt=False): for k in range(len(dataFiles)):", "BrailleHandler.countPeaks(datavector,peakTh) #if it is the first iteration, create the training data if k", "number of peaks peakTh = 0.05 #threshold for peak detection #create the feature", "#number of rows ncols = inputMatrix.shape[1] #number of columns nsamples = inputMatrix.shape[2] #number", "os, os.path, sys sys.path.append('../general') import numpy as np import scipy as sp from", "peaks for #each taxel of the tactile sensor featureVector = np.zeros(nrows*ncols) #matrix M*NxT", "the training data if k != 0: trainingData = np.vstack((trainingData,featurevector)) else: trainingData =", "#get a single taxel signal tactileSignal[counter] = inputMatrix[k,w,:] #count the number of peaks", "featureVector[k] = len(tmppeaks) #return the feature vector return featureVector #------------------------------------------------------------------------------- #create the training", "column) #into a 3D array (row,col,frame) def oldconvert2frames(data,nrows,ncols): datamat = np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int) c =", "range(ncols): dataVector[taxelCounter] = datamat[i,j,:] taxelCounter+=1 return dataVector #return the dataVector #convert data from", "ok else: return False #file not found #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #save a new SVM", "text data def createTrainingData(dataFiles,nrows,ncols,filt=False): for k in range(len(dataFiles)): #get the filename filename =", "np.size(datamat,2) #number of samples or frames dataVector = np.zeros((nrows*ncols,numsamples)) taxelCounter = 0 for", "<NAME>, PhD # Contact: #------------------------------------------------------------------------------- # Description: defines classes for processing tactile data", "samples #feature vector containing the number of peaks for #each taxel of the", "through the rows for k in range(nrows): #loop through the columns for w", "datamat = BrailleHandler.oldconvert2frames(data,nrows,ncols) numsamples = np.size(datamat,2) #number of samples or frames dataVector =", "peaks peakTh = 0.05 #threshold for peak detection #create the feature vector featurevector", "SVM model self.modelSVM = SVC() #pass the training data and the labels for", "NCOLS = 4 #number of lines in the tactile array peakTh = 300", "for processing tactile data to be used for # braille recognition. # The", "#and built the feature vector #find the peaks tmppeaks = detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False) #number of", "#------------------------------------------------------------------------------- #Braille Recognition Class class Braille(): def __init__(self): #labels for every class #dictionary", "2D array #moving average of the 2D matrix #create a moving average object", "os.path.isfile(filepath): self.modelSVM = joblib.load(filepath) #loads the SVM model return True #load ok else:", "None: #classify based on the input features svmResp = self.modelSVM.predict(features) #return the output", "filtered if filt == True: #for every taxel for i in range(np.size(datavector,0)): mva", "sample by sample tactileMVA[counter,z] = mva.getSample(tactileVector[k,z]) counter+=1 #increment the taxel counter #with the", "classes for processing tactile data to be used for # braille recognition. #", "number of peaks for each taxel features = BrailleHandler.countPeaks(tactileData,peakTh) #--------------------------------------------------------------------------- #feature extraction with", "data if k != 0: trainingData = np.vstack((trainingData,featurevector)) else: trainingData = featurevector return", "#------------------------------------------------------------------------------- if __name__=='__main__': #--------------------------------------------------------------------------- import numpy as np #numpy import matplotlib.pyplot as plt", "of peaks for #each taxel of the tactile sensor featureVector = np.zeros(nrows*ncols) #matrix", "== 3: #3D matrix nrows = inputMatrix.shape[0] #number of rows ncols = inputMatrix.shape[1]", "peaks is the length of 'tmppeaks' featureVector[k] = len(tmppeaks) #return the feature vector", "SVM model to classify the data if self.modelSVM is not None: #classify based", "= detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False) #number of peaks is the length of 'tmppeaks' featureVector[k] = len(tmppeaks)", "#threshold for peak detection #create the feature vector featurevector = BrailleHandler.countPeaks(datavector,peakTh) #if it", "only deals # with the data ready for training and/or classification procedures. #", "if there is a SVM model to classify the data if self.modelSVM is", "the training data and the labels for training self.modelSVM.fit(trainingData,labels) #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #classification #features", "w in range(ncols): #get a single taxel signal tactileSignal[counter] = inputMatrix[k,w,:] #count the", "oldconvert2frames(data,nrows,ncols): datamat = np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int) c = 0 for ii in range(0,(np.size(data,0)-nrows),nrows): datamat[:,:,c] =", "#create the feature vector featurevector = BrailleHandler.countPeaks(datavector,peakTh) #if it is the first iteration,", "the training data based on the list of the text files to be", "tactile array NCOLS = 4 #number of lines in the tactile array peakTh", "class stores the SVM model used to recognize braille characters. # this class", "SVM model return True #load ok else: return False #file not found #---------------------------------------------------------------------------", "NROWS = 4 #number of columns in the tactile array NCOLS = 4", "#--------------------------------------------------------------------------- #feature extraction with 2D array #moving average of the 2D matrix #create", "values self.classes = dict() #SVM model self.modelSVM = None #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #load a", "average of the 2D matrix #create a moving average object #default parameters, windowsize", "= 10, sampfreq = 100 Hz mva = MovingAverage() tactileVector = BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS) numsamples", "0 #taxel counter for k in range(NROWS*NCOLS): #scan all the columns for z", "single taxel def countPeaks(inputMatrix,threshold): if len(inputMatrix.shape) == 3: #3D matrix nrows = inputMatrix.shape[0]", "c = c+1 return datamat #return the 3D matrix #--------------------------------------------------------------------------- #find the number", "= np.zeros(numberTaxels) #scan all the taxels for k in range(numberTaxels): #find the peaks", "inputMatrix.shape[1] #number of columns nsamples = inputMatrix.shape[2] #number of samples #feature vector containing", "= np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int) c = 0 for ii in range(0,(np.size(data,0)-nrows),nrows): datamat[:,:,c] = data[ii:ii+nrows,:] c", "= inputMatrix[k,w,:] #count the number of peaks in the signal #and built the", "columns for w in range(ncols): #get a single taxel signal tactileSignal[counter] = inputMatrix[k,w,:]", "pre-trained SVM model from a file def load(self,filepath): #checks if the file exists", "array peakTh = 300 #threshold for detecting peaks #load the braille data from", "classify(self,features): #check if there is a SVM model to classify the data if", "#number of columns in the tactile array NCOLS = 4 #number of lines", "BrailleHandler.countPeaks(tactileData,peakTh) #--------------------------------------------------------------------------- #feature extraction with 2D array #moving average of the 2D matrix", "np.zeros((nrows*ncols,numsamples)) taxelCounter = 0 for i in range(nrows): for j in range(ncols): dataVector[taxelCounter]", "rows for one column) #into a 3D array (row,col,frame) def oldconvert2frames(data,nrows,ncols): datamat =", "not found def convert2vector(data): return np.transpose(data) #convert the data from a file into", "= data[ii:ii+nrows,:] c = c+1 return datamat #return the 3D matrix #--------------------------------------------------------------------------- #find", "for i in range(nrows): for j in range(ncols): dataVector[taxelCounter] = datamat[i,j,:] taxelCounter+=1 return", "#read a file and return the data def loadFile(filepath): if os.path.isfile(filepath): #return the", "= BrailleHandler.oldconvert2frames(data,nrows,ncols) numsamples = np.size(datamat,2) #number of samples or frames dataVector = np.zeros((nrows*ncols,numsamples))", "classification procedures. # For handling data, the class 'BrailleHandler' should be used instead", "pattern #that was used for training def classify(self,features): #check if there is a", "load(self,filepath): #checks if the file exists if os.path.isfile(filepath): self.modelSVM = joblib.load(filepath) #loads the", "not found #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #save a new SVM model def save(self,filename): #saving joblib.dump(self.modelSVM,filename+'.pkl')", "the number of peaks for each taxel features = BrailleHandler.countPeaks(tactileData,peakTh) #--------------------------------------------------------------------------- #feature extraction", "#checks if the file exists if os.path.isfile(filepath): self.modelSVM = joblib.load(filepath) #loads the SVM", "range(nrows): #loop through the columns for w in range(ncols): #get a single taxel", "i in range(nrows): for j in range(ncols): dataVector[taxelCounter] = datamat[i,j,:] taxelCounter+=1 return dataVector", "#threshold for detecting peaks #load the braille data from file #2D matrix datafile", "samples or frames dataVector = np.zeros((nrows*ncols,numsamples)) taxelCounter = 0 for i in range(nrows):", "sys sys.path.append('../general') import numpy as np import scipy as sp from sklearn.svm import", "of the tactile sensor featureVector = np.zeros(numberTaxels) #scan all the taxels for k", "import scipy as sp from sklearn.svm import SVC from sklearn.externals import joblib from", "UNIVERSITY OF SINGAPORE - NUS # SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE #", "svmResp = self.modelSVM.predict(features) #return the output of the classifier return svmResp else: return", "moving average object #default parameters, windowsize = 10, sampfreq = 100 Hz mva", "model from a file def load(self,filepath): #checks if the file exists if os.path.isfile(filepath):", "contained in the data return np.loadtxt(filepath) else: return False #file not found def", "class 'BrailleHandler' should be used instead #------------------------------------------------------------------------------- ''' #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #LIBRARIES import os,", "list, every element of the list corresponds to #the time series of a", "Braille(): def __init__(self): #labels for every class #dictionary to associate label names and", "in the tactile array peakTh = 300 #threshold for detecting peaks #load the", "#load ok else: return False #file not found #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #save a new", "self.modelSVM = SVC() #pass the training data and the labels for training self.modelSVM.fit(trainingData,labels)", "def classify(self,features): #check if there is a SVM model to classify the data", "sample tactileMVA[counter,z] = mva.getSample(tactileVector[k,z]) counter+=1 #increment the taxel counter #with the filtered data,", "the text files to be loaded #and the labels corresponding for each text", "not None: #classify based on the input features svmResp = self.modelSVM.predict(features) #return the", "#--------------------------------------------------------------------------- #read a file and return the data def loadFile(filepath): if os.path.isfile(filepath): #return", "columns in the tactile array NCOLS = 4 #number of lines in the", "output of the classifier return svmResp else: return False #--------------------------------------------------------------------------- #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- if", "it only deals # with the data ready for training and/or classification procedures.", "every single taxel def countPeaks(inputMatrix,threshold): if len(inputMatrix.shape) == 3: #3D matrix nrows =", "#Feature extraction for SVM-based braille classification class BrailleHandler(): #--------------------------------------------------------------------------- #read a file and", "classification class BrailleHandler(): #--------------------------------------------------------------------------- #read a file and return the data def loadFile(filepath):", "the 2D matrix #create a moving average object #default parameters, windowsize = 10,", "PhD # Contact: #------------------------------------------------------------------------------- # Description: defines classes for processing tactile data to", "a file and return the data def loadFile(filepath): if os.path.isfile(filepath): #return the data", "taxel and the #columns to the time series signal tactileSignal = np.zeros((nrows*ncols,nsamples)) #counter", "= len(inputMatrix) #feature vector containing the number of peaks for #each taxel of", "= 0.05 #threshold for peak detection #create the feature vector featurevector = BrailleHandler.countPeaks(datavector,peakTh)", "file into a vector def oldconvert2vector(data,nrows,ncols): #first convert to 3D matrix datamat =", "= np.vstack((trainingData,featurevector)) else: trainingData = featurevector return trainingData #------------------------------------------------------------------------------- #Braille Recognition Class class", "loadFile(filepath): if os.path.isfile(filepath): #return the data contained in the data return np.loadtxt(filepath) else:", "#--------------------------------------------------------------------------- #save a new SVM model def save(self,filename): #saving joblib.dump(self.modelSVM,filename+'.pkl') #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #train", "recognize braille characters. # this class abstracts the process of data processing, meaning", "the number of peaks in the signal #and built the feature vector #find", "else: return False #file not found #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #save a new SVM model", "the tactile sensor featureVector = np.zeros(nrows*ncols) #matrix M*NxT where each row corresponds to", "rows ncols = inputMatrix.shape[1] #number of columns nsamples = inputMatrix.shape[2] #number of samples", "a single taxel signal tactileSignal[counter] = inputMatrix[k,w,:] #count the number of peaks in", "single taxel else: #find the total number of taxels in the tactile array", "a pre-trained SVM model from a file def load(self,filepath): #checks if the file", "SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE # Singapore # URL: http://www.sinapseinstitute.org #------------------------------------------------------------------------------- #", "return True #load ok else: return False #file not found #--------------------------------------------------------------------------- #--------------------------------------------------------------------------- #save", "on the input features svmResp = self.modelSVM.predict(features) #return the output of the classifier", "BrailleHandler.convert2vector(datafile) #if data should be filtered if filt == True: #for every taxel", "a SVM model to classify the data if self.modelSVM is not None: #classify", "#scan all the columns for z in range(numsamples): #filtering the signal sample by", "the data def loadFile(filepath): if os.path.isfile(filepath): #return the data contained in the data", "return np.transpose(data) #convert the data from a file into a vector def oldconvert2vector(data,nrows,ncols):", "the tactile array numberTaxels = len(inputMatrix) #feature vector containing the number of peaks", "all rows for one column) #into a 3D array (row,col,frame) def oldconvert2frames(data,nrows,ncols): datamat", "'tmppeaks' featureVector[counter] = len(tmppeaks) #increment the counter counter+=1 #list of list, every element", "for i in range(np.size(datavector,0)): mva = MovingAverage() #window size = 10, sampfreq =", "def createTrainingData(dataFiles,nrows,ncols,filt=False): for k in range(len(dataFiles)): #get the filename filename = dataFiles[k] #load", "mva.getSample(tactileVector[k,z]) counter+=1 #increment the taxel counter #with the filtered data, count peaks again", "# Description: defines classes for processing tactile data to be used for #", "else: trainingData = featurevector return trainingData #------------------------------------------------------------------------------- #Braille Recognition Class class Braille(): def", "* #import the detect_peaks method #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #Feature extraction for SVM-based braille classification", "rows for k in range(nrows): #loop through the columns for w in range(ncols):", "contains reading from all rows for one column) #into a 3D array (row,col,frame)" ]
[ "import argparse import matplotlib matplotlib.use('pdf') import matplotlib.pyplot as plt import os import json", "= os.path.join(args.dir, 'plots') if not os.path.exists(save_dir): os.makedirs(save_dir) plt.savefig(os.path.join(save_dir, f'property_{args.property}_{args.sub_dir}.pdf')) if __name__ == '__main__':", "in os.listdir(os.path.join(args.dir, dir)): # find the right directory in level if 'properties' in", "plt import os import json import numpy as np from platforms.platform import get_platform", "break # elif args.layers < 0 and i < len(generator) + args.layers: #", "(i + 1) not in layers: continue prop_l = np.array([v[layer] for k, v", "continue prop_l = np.array([v[layer] for k, v in property.items()]) prop_rand_l = np.array([v[layer] for", "sparsity.append((sparse_dict['unpruned'] / sparse_dict['total']) * 100) for subdir in os.listdir(os.path.join(args.dir, dir)): # find the", "for dir in os.listdir(args.dir): # level directories if not 'level' in dir: continue", "len(generator) + args.layers: # continue if (i + 1) not in layers: continue", "in subdir: path = os.path.join(args.dir, dir, subdir, f'properties_{args.property}.log') if args.property in path and", "[] for dir in os.listdir(args.dir): # level directories if not 'level' in dir:", "dict = json.load(f) property[int(dir.split('_')[1])] = dict['lth'] rand_property[int(dir.split('_')[1])] = dict['random'] sparsity = np.array(sparsity) keys", "os.makedirs(save_dir) plt.savefig(os.path.join(save_dir, f'property_{args.property}_{args.sub_dir}.pdf')) if __name__ == '__main__': # Arguement Parser parser = argparse.ArgumentParser()", "parser = argparse.ArgumentParser() parser.add_argument(\"--dir\", type=str, help=\"Directory\") parser.add_argument(\"--property\", type=str, help=\"property\") parser.add_argument(\"--sub_dir\", type=str, help=\"property directory", "as np from platforms.platform import get_platform import seaborn as sns def main(args): property=", "os.listdir(os.path.join(args.dir, dir)): # find the right directory in level if 'properties' in subdir:", "= np.argsort(np.array(list(rand_property.keys()))) colors_list = ['blue', 'green', 'magenta', 'red', 'brown', 'cyan', 'purple', 'grey', 'orange',", "/ sparse_dict['total']) * 100) for subdir in os.listdir(os.path.join(args.dir, dir)): # find the right", "else range(len(rand_property[0])) layers = [range(args.layers[0])] if len(args.layers) == 1 else args.layers for i,", "__name__ == '__main__': # Arguement Parser parser = argparse.ArgumentParser() parser.add_argument(\"--dir\", type=str, help=\"Directory\") parser.add_argument(\"--property\",", "sparsity = np.array(sparsity) keys = np.array(list(rand_property.keys())) id = np.argsort(np.array(list(rand_property.keys()))) colors_list = ['blue', 'green',", "as plt import os import json import numpy as np from platforms.platform import", "# level directories if not 'level' in dir: continue with open(os.path.join(args.dir, dir, 'main',", "directory number\") parser.add_argument(\"--layers\", nargs='+', type=int, help=\"number of layers to plot\") args = parser.parse_args()", "'weight' in args.property else range(len(rand_property[0])) layers = [range(args.layers[0])] if len(args.layers) == 1 else", "args.sub_dir in path: with open(path, 'rb') as f: dict = json.load(f) property[int(dir.split('_')[1])] =", "dict['random'] sparsity = np.array(sparsity) keys = np.array(list(rand_property.keys())) id = np.argsort(np.array(list(rand_property.keys()))) colors_list = ['blue',", "elif args.layers < 0 and i < len(generator) + args.layers: # continue if", "import get_platform import seaborn as sns def main(args): property= {} rand_property = {}", "= [] for dir in os.listdir(args.dir): # level directories if not 'level' in", "json.load(f_sparse) sparsity.append((sparse_dict['unpruned'] / sparse_dict['total']) * 100) for subdir in os.listdir(os.path.join(args.dir, dir)): # find", "in args.property else range(len(rand_property[0])) layers = [range(args.layers[0])] if len(args.layers) == 1 else args.layers", "if __name__ == '__main__': # Arguement Parser parser = argparse.ArgumentParser() parser.add_argument(\"--dir\", type=str, help=\"Directory\")", "np from platforms.platform import get_platform import seaborn as sns def main(args): property= {}", "rand_property[int(dir.split('_')[1])] = dict['random'] sparsity = np.array(sparsity) keys = np.array(list(rand_property.keys())) id = np.argsort(np.array(list(rand_property.keys()))) colors_list", "os import json import numpy as np from platforms.platform import get_platform import seaborn", "+ 1) not in layers: continue prop_l = np.array([v[layer] for k, v in", "for k, v in rand_property.items()]) plt.plot(sparsity[id], prop_l[id], 'o', label=f'lth layer{i+1}', color=colors_list[i]) plt.plot(sparsity[id], prop_rand_l[id],", "for k, v in property.items()]) prop_rand_l = np.array([v[layer] for k, v in rand_property.items()])", "parser.add_argument(\"--sub_dir\", type=str, help=\"property directory number\") parser.add_argument(\"--layers\", nargs='+', type=int, help=\"number of layers to plot\")", "matplotlib.use('pdf') import matplotlib.pyplot as plt import os import json import numpy as np", "os.path.join(args.dir, 'plots') if not os.path.exists(save_dir): os.makedirs(save_dir) plt.savefig(os.path.join(save_dir, f'property_{args.property}_{args.sub_dir}.pdf')) if __name__ == '__main__': #", "matplotlib matplotlib.use('pdf') import matplotlib.pyplot as plt import os import json import numpy as", "args.layers: # continue if (i + 1) not in layers: continue prop_l =", "path and args.sub_dir in path: with open(path, 'rb') as f: dict = json.load(f)", "and i >= args.layers: # break # elif args.layers < 0 and i", "if 'properties' in subdir: path = os.path.join(args.dir, dir, subdir, f'properties_{args.property}.log') if args.property in", "args.layers < 0 and i < len(generator) + args.layers: # continue if (i", "# find the right directory in level if 'properties' in subdir: path =", "colors_list = ['blue', 'green', 'magenta', 'red', 'brown', 'cyan', 'purple', 'grey', 'orange', 'pink', 'lime']", "main(args): property= {} rand_property = {} sparsity = [] for dir in os.listdir(args.dir):", "1 else args.layers for i, layer in enumerate(generator): # if args.layers > 0", "in os.listdir(args.dir): # level directories if not 'level' in dir: continue with open(os.path.join(args.dir,", "if args.layers > 0 and i >= args.layers: # break # elif args.layers", "Parser parser = argparse.ArgumentParser() parser.add_argument(\"--dir\", type=str, help=\"Directory\") parser.add_argument(\"--property\", type=str, help=\"property\") parser.add_argument(\"--sub_dir\", type=str, help=\"property", ">= args.layers: # break # elif args.layers < 0 and i < len(generator)", "subdir, f'properties_{args.property}.log') if args.property in path and args.sub_dir in path: with open(path, 'rb')", "help=\"property\") parser.add_argument(\"--sub_dir\", type=str, help=\"property directory number\") parser.add_argument(\"--layers\", nargs='+', type=int, help=\"number of layers to", "rand_property[0].keys() if 'weight' in args.property else range(len(rand_property[0])) layers = [range(args.layers[0])] if len(args.layers) ==", "{} sparsity = [] for dir in os.listdir(args.dir): # level directories if not", "in layers: continue prop_l = np.array([v[layer] for k, v in property.items()]) prop_rand_l =", "find the right directory in level if 'properties' in subdir: path = os.path.join(args.dir,", "'red', 'brown', 'cyan', 'purple', 'grey', 'orange', 'pink', 'lime'] generator = rand_property[0].keys() if 'weight'", "os.path.join(args.dir, dir, subdir, f'properties_{args.property}.log') if args.property in path and args.sub_dir in path: with", "for i, layer in enumerate(generator): # if args.layers > 0 and i >=", "np.array([v[layer] for k, v in property.items()]) prop_rand_l = np.array([v[layer] for k, v in", "in path: with open(path, 'rb') as f: dict = json.load(f) property[int(dir.split('_')[1])] = dict['lth']", "subdir in os.listdir(os.path.join(args.dir, dir)): # find the right directory in level if 'properties'", "open(os.path.join(args.dir, dir, 'main', 'sparsity_report.json'), 'rb') as f_sparse: sparse_dict = json.load(f_sparse) sparsity.append((sparse_dict['unpruned'] / sparse_dict['total'])", "f'properties_{args.property}.log') if args.property in path and args.sub_dir in path: with open(path, 'rb') as", "'properties' in subdir: path = os.path.join(args.dir, dir, subdir, f'properties_{args.property}.log') if args.property in path", "figure save_dir = os.path.join(args.dir, 'plots') if not os.path.exists(save_dir): os.makedirs(save_dir) plt.savefig(os.path.join(save_dir, f'property_{args.property}_{args.sub_dir}.pdf')) if __name__", "= os.path.join(args.dir, dir, subdir, f'properties_{args.property}.log') if args.property in path and args.sub_dir in path:", "'sparsity_report.json'), 'rb') as f_sparse: sparse_dict = json.load(f_sparse) sparsity.append((sparse_dict['unpruned'] / sparse_dict['total']) * 100) for", "'main', 'sparsity_report.json'), 'rb') as f_sparse: sparse_dict = json.load(f_sparse) sparsity.append((sparse_dict['unpruned'] / sparse_dict['total']) * 100)", "dir, subdir, f'properties_{args.property}.log') if args.property in path and args.sub_dir in path: with open(path,", "= json.load(f) property[int(dir.split('_')[1])] = dict['lth'] rand_property[int(dir.split('_')[1])] = dict['random'] sparsity = np.array(sparsity) keys =", "import json import numpy as np from platforms.platform import get_platform import seaborn as", "os.path.exists(save_dir): os.makedirs(save_dir) plt.savefig(os.path.join(save_dir, f'property_{args.property}_{args.sub_dir}.pdf')) if __name__ == '__main__': # Arguement Parser parser =", "in path and args.sub_dir in path: with open(path, 'rb') as f: dict =", "layers = [range(args.layers[0])] if len(args.layers) == 1 else args.layers for i, layer in", "import numpy as np from platforms.platform import get_platform import seaborn as sns def", "import matplotlib matplotlib.use('pdf') import matplotlib.pyplot as plt import os import json import numpy", "= np.array([v[layer] for k, v in property.items()]) prop_rand_l = np.array([v[layer] for k, v", "type=str, help=\"Directory\") parser.add_argument(\"--property\", type=str, help=\"property\") parser.add_argument(\"--sub_dir\", type=str, help=\"property directory number\") parser.add_argument(\"--layers\", nargs='+', type=int,", "continue if (i + 1) not in layers: continue prop_l = np.array([v[layer] for", "'green', 'magenta', 'red', 'brown', 'cyan', 'purple', 'grey', 'orange', 'pink', 'lime'] generator = rand_property[0].keys()", "label=f'lth layer{i+1}', color=colors_list[i]) plt.plot(sparsity[id], prop_rand_l[id], 'x', label=f'random layer{i+1}', color=colors_list[i]) plt.yscale('log') plt.legend() # save", "id = np.argsort(np.array(list(rand_property.keys()))) colors_list = ['blue', 'green', 'magenta', 'red', 'brown', 'cyan', 'purple', 'grey',", "k, v in rand_property.items()]) plt.plot(sparsity[id], prop_l[id], 'o', label=f'lth layer{i+1}', color=colors_list[i]) plt.plot(sparsity[id], prop_rand_l[id], 'x',", "f_sparse: sparse_dict = json.load(f_sparse) sparsity.append((sparse_dict['unpruned'] / sparse_dict['total']) * 100) for subdir in os.listdir(os.path.join(args.dir,", "Arguement Parser parser = argparse.ArgumentParser() parser.add_argument(\"--dir\", type=str, help=\"Directory\") parser.add_argument(\"--property\", type=str, help=\"property\") parser.add_argument(\"--sub_dir\", type=str,", "help=\"property directory number\") parser.add_argument(\"--layers\", nargs='+', type=int, help=\"number of layers to plot\") args =", "sns def main(args): property= {} rand_property = {} sparsity = [] for dir", "np.array(sparsity) keys = np.array(list(rand_property.keys())) id = np.argsort(np.array(list(rand_property.keys()))) colors_list = ['blue', 'green', 'magenta', 'red',", "i, layer in enumerate(generator): # if args.layers > 0 and i >= args.layers:", "layer in enumerate(generator): # if args.layers > 0 and i >= args.layers: #", "not in layers: continue prop_l = np.array([v[layer] for k, v in property.items()]) prop_rand_l", "if not os.path.exists(save_dir): os.makedirs(save_dir) plt.savefig(os.path.join(save_dir, f'property_{args.property}_{args.sub_dir}.pdf')) if __name__ == '__main__': # Arguement Parser", "dir: continue with open(os.path.join(args.dir, dir, 'main', 'sparsity_report.json'), 'rb') as f_sparse: sparse_dict = json.load(f_sparse)", "# elif args.layers < 0 and i < len(generator) + args.layers: # continue", "for subdir in os.listdir(os.path.join(args.dir, dir)): # find the right directory in level if", "in enumerate(generator): # if args.layers > 0 and i >= args.layers: # break", "= np.array(sparsity) keys = np.array(list(rand_property.keys())) id = np.argsort(np.array(list(rand_property.keys()))) colors_list = ['blue', 'green', 'magenta',", "'grey', 'orange', 'pink', 'lime'] generator = rand_property[0].keys() if 'weight' in args.property else range(len(rand_property[0]))", "'x', label=f'random layer{i+1}', color=colors_list[i]) plt.yscale('log') plt.legend() # save figure save_dir = os.path.join(args.dir, 'plots')", "from platforms.platform import get_platform import seaborn as sns def main(args): property= {} rand_property", "property[int(dir.split('_')[1])] = dict['lth'] rand_property[int(dir.split('_')[1])] = dict['random'] sparsity = np.array(sparsity) keys = np.array(list(rand_property.keys())) id", "'brown', 'cyan', 'purple', 'grey', 'orange', 'pink', 'lime'] generator = rand_property[0].keys() if 'weight' in", "np.array([v[layer] for k, v in rand_property.items()]) plt.plot(sparsity[id], prop_l[id], 'o', label=f'lth layer{i+1}', color=colors_list[i]) plt.plot(sparsity[id],", "label=f'random layer{i+1}', color=colors_list[i]) plt.yscale('log') plt.legend() # save figure save_dir = os.path.join(args.dir, 'plots') if", "parser.add_argument(\"--property\", type=str, help=\"property\") parser.add_argument(\"--sub_dir\", type=str, help=\"property directory number\") parser.add_argument(\"--layers\", nargs='+', type=int, help=\"number of", "number\") parser.add_argument(\"--layers\", nargs='+', type=int, help=\"number of layers to plot\") args = parser.parse_args() main(args)", "'purple', 'grey', 'orange', 'pink', 'lime'] generator = rand_property[0].keys() if 'weight' in args.property else", "'orange', 'pink', 'lime'] generator = rand_property[0].keys() if 'weight' in args.property else range(len(rand_property[0])) layers", "os.listdir(args.dir): # level directories if not 'level' in dir: continue with open(os.path.join(args.dir, dir,", "directories if not 'level' in dir: continue with open(os.path.join(args.dir, dir, 'main', 'sparsity_report.json'), 'rb')", "def main(args): property= {} rand_property = {} sparsity = [] for dir in", "color=colors_list[i]) plt.yscale('log') plt.legend() # save figure save_dir = os.path.join(args.dir, 'plots') if not os.path.exists(save_dir):", "i < len(generator) + args.layers: # continue if (i + 1) not in", "property= {} rand_property = {} sparsity = [] for dir in os.listdir(args.dir): #", "in level if 'properties' in subdir: path = os.path.join(args.dir, dir, subdir, f'properties_{args.property}.log') if", "open(path, 'rb') as f: dict = json.load(f) property[int(dir.split('_')[1])] = dict['lth'] rand_property[int(dir.split('_')[1])] = dict['random']", "get_platform import seaborn as sns def main(args): property= {} rand_property = {} sparsity", "= json.load(f_sparse) sparsity.append((sparse_dict['unpruned'] / sparse_dict['total']) * 100) for subdir in os.listdir(os.path.join(args.dir, dir)): #", "# continue if (i + 1) not in layers: continue prop_l = np.array([v[layer]", "import matplotlib.pyplot as plt import os import json import numpy as np from", "# if args.layers > 0 and i >= args.layers: # break # elif", "v in rand_property.items()]) plt.plot(sparsity[id], prop_l[id], 'o', label=f'lth layer{i+1}', color=colors_list[i]) plt.plot(sparsity[id], prop_rand_l[id], 'x', label=f'random", "= np.array(list(rand_property.keys())) id = np.argsort(np.array(list(rand_property.keys()))) colors_list = ['blue', 'green', 'magenta', 'red', 'brown', 'cyan',", "parser.add_argument(\"--dir\", type=str, help=\"Directory\") parser.add_argument(\"--property\", type=str, help=\"property\") parser.add_argument(\"--sub_dir\", type=str, help=\"property directory number\") parser.add_argument(\"--layers\", nargs='+',", "0 and i < len(generator) + args.layers: # continue if (i + 1)", "rand_property.items()]) plt.plot(sparsity[id], prop_l[id], 'o', label=f'lth layer{i+1}', color=colors_list[i]) plt.plot(sparsity[id], prop_rand_l[id], 'x', label=f'random layer{i+1}', color=colors_list[i])", "color=colors_list[i]) plt.plot(sparsity[id], prop_rand_l[id], 'x', label=f'random layer{i+1}', color=colors_list[i]) plt.yscale('log') plt.legend() # save figure save_dir", "not os.path.exists(save_dir): os.makedirs(save_dir) plt.savefig(os.path.join(save_dir, f'property_{args.property}_{args.sub_dir}.pdf')) if __name__ == '__main__': # Arguement Parser parser", "numpy as np from platforms.platform import get_platform import seaborn as sns def main(args):", "if len(args.layers) == 1 else args.layers for i, layer in enumerate(generator): # if", "prop_l[id], 'o', label=f'lth layer{i+1}', color=colors_list[i]) plt.plot(sparsity[id], prop_rand_l[id], 'x', label=f'random layer{i+1}', color=colors_list[i]) plt.yscale('log') plt.legend()", "prop_l = np.array([v[layer] for k, v in property.items()]) prop_rand_l = np.array([v[layer] for k,", "rand_property = {} sparsity = [] for dir in os.listdir(args.dir): # level directories", "plt.plot(sparsity[id], prop_rand_l[id], 'x', label=f'random layer{i+1}', color=colors_list[i]) plt.yscale('log') plt.legend() # save figure save_dir =", "np.array(list(rand_property.keys())) id = np.argsort(np.array(list(rand_property.keys()))) colors_list = ['blue', 'green', 'magenta', 'red', 'brown', 'cyan', 'purple',", "with open(path, 'rb') as f: dict = json.load(f) property[int(dir.split('_')[1])] = dict['lth'] rand_property[int(dir.split('_')[1])] =", "v in property.items()]) prop_rand_l = np.array([v[layer] for k, v in rand_property.items()]) plt.plot(sparsity[id], prop_l[id],", "plt.yscale('log') plt.legend() # save figure save_dir = os.path.join(args.dir, 'plots') if not os.path.exists(save_dir): os.makedirs(save_dir)", "type=str, help=\"property\") parser.add_argument(\"--sub_dir\", type=str, help=\"property directory number\") parser.add_argument(\"--layers\", nargs='+', type=int, help=\"number of layers", "type=str, help=\"property directory number\") parser.add_argument(\"--layers\", nargs='+', type=int, help=\"number of layers to plot\") args", "level directories if not 'level' in dir: continue with open(os.path.join(args.dir, dir, 'main', 'sparsity_report.json'),", "if 'weight' in args.property else range(len(rand_property[0])) layers = [range(args.layers[0])] if len(args.layers) == 1", "dir)): # find the right directory in level if 'properties' in subdir: path", "keys = np.array(list(rand_property.keys())) id = np.argsort(np.array(list(rand_property.keys()))) colors_list = ['blue', 'green', 'magenta', 'red', 'brown',", "sparse_dict['total']) * 100) for subdir in os.listdir(os.path.join(args.dir, dir)): # find the right directory", "the right directory in level if 'properties' in subdir: path = os.path.join(args.dir, dir,", "property.items()]) prop_rand_l = np.array([v[layer] for k, v in rand_property.items()]) plt.plot(sparsity[id], prop_l[id], 'o', label=f'lth", "matplotlib.pyplot as plt import os import json import numpy as np from platforms.platform", "json.load(f) property[int(dir.split('_')[1])] = dict['lth'] rand_property[int(dir.split('_')[1])] = dict['random'] sparsity = np.array(sparsity) keys = np.array(list(rand_property.keys()))", "i >= args.layers: # break # elif args.layers < 0 and i <", "'level' in dir: continue with open(os.path.join(args.dir, dir, 'main', 'sparsity_report.json'), 'rb') as f_sparse: sparse_dict", "else args.layers for i, layer in enumerate(generator): # if args.layers > 0 and", "f: dict = json.load(f) property[int(dir.split('_')[1])] = dict['lth'] rand_property[int(dir.split('_')[1])] = dict['random'] sparsity = np.array(sparsity)", "<reponame>NogaBar/open_lth import argparse import matplotlib matplotlib.use('pdf') import matplotlib.pyplot as plt import os import", "< 0 and i < len(generator) + args.layers: # continue if (i +", "{} rand_property = {} sparsity = [] for dir in os.listdir(args.dir): # level", "= {} sparsity = [] for dir in os.listdir(args.dir): # level directories if", "['blue', 'green', 'magenta', 'red', 'brown', 'cyan', 'purple', 'grey', 'orange', 'pink', 'lime'] generator =", "= [range(args.layers[0])] if len(args.layers) == 1 else args.layers for i, layer in enumerate(generator):", "layers: continue prop_l = np.array([v[layer] for k, v in property.items()]) prop_rand_l = np.array([v[layer]", "save figure save_dir = os.path.join(args.dir, 'plots') if not os.path.exists(save_dir): os.makedirs(save_dir) plt.savefig(os.path.join(save_dir, f'property_{args.property}_{args.sub_dir}.pdf')) if", "as sns def main(args): property= {} rand_property = {} sparsity = [] for", "plt.savefig(os.path.join(save_dir, f'property_{args.property}_{args.sub_dir}.pdf')) if __name__ == '__main__': # Arguement Parser parser = argparse.ArgumentParser() parser.add_argument(\"--dir\",", "argparse.ArgumentParser() parser.add_argument(\"--dir\", type=str, help=\"Directory\") parser.add_argument(\"--property\", type=str, help=\"property\") parser.add_argument(\"--sub_dir\", type=str, help=\"property directory number\") parser.add_argument(\"--layers\",", "dir, 'main', 'sparsity_report.json'), 'rb') as f_sparse: sparse_dict = json.load(f_sparse) sparsity.append((sparse_dict['unpruned'] / sparse_dict['total']) *", "seaborn as sns def main(args): property= {} rand_property = {} sparsity = []", "in dir: continue with open(os.path.join(args.dir, dir, 'main', 'sparsity_report.json'), 'rb') as f_sparse: sparse_dict =", "args.layers > 0 and i >= args.layers: # break # elif args.layers <", "'magenta', 'red', 'brown', 'cyan', 'purple', 'grey', 'orange', 'pink', 'lime'] generator = rand_property[0].keys() if", "if (i + 1) not in layers: continue prop_l = np.array([v[layer] for k,", "plt.plot(sparsity[id], prop_l[id], 'o', label=f'lth layer{i+1}', color=colors_list[i]) plt.plot(sparsity[id], prop_rand_l[id], 'x', label=f'random layer{i+1}', color=colors_list[i]) plt.yscale('log')", "'pink', 'lime'] generator = rand_property[0].keys() if 'weight' in args.property else range(len(rand_property[0])) layers =", "right directory in level if 'properties' in subdir: path = os.path.join(args.dir, dir, subdir,", "subdir: path = os.path.join(args.dir, dir, subdir, f'properties_{args.property}.log') if args.property in path and args.sub_dir", "layer{i+1}', color=colors_list[i]) plt.plot(sparsity[id], prop_rand_l[id], 'x', label=f'random layer{i+1}', color=colors_list[i]) plt.yscale('log') plt.legend() # save figure", "generator = rand_property[0].keys() if 'weight' in args.property else range(len(rand_property[0])) layers = [range(args.layers[0])] if", "json import numpy as np from platforms.platform import get_platform import seaborn as sns", "if args.property in path and args.sub_dir in path: with open(path, 'rb') as f:", "0 and i >= args.layers: # break # elif args.layers < 0 and", "help=\"Directory\") parser.add_argument(\"--property\", type=str, help=\"property\") parser.add_argument(\"--sub_dir\", type=str, help=\"property directory number\") parser.add_argument(\"--layers\", nargs='+', type=int, help=\"number", "as f: dict = json.load(f) property[int(dir.split('_')[1])] = dict['lth'] rand_property[int(dir.split('_')[1])] = dict['random'] sparsity =", "= dict['random'] sparsity = np.array(sparsity) keys = np.array(list(rand_property.keys())) id = np.argsort(np.array(list(rand_property.keys()))) colors_list =", "enumerate(generator): # if args.layers > 0 and i >= args.layers: # break #", "in property.items()]) prop_rand_l = np.array([v[layer] for k, v in rand_property.items()]) plt.plot(sparsity[id], prop_l[id], 'o',", "'rb') as f_sparse: sparse_dict = json.load(f_sparse) sparsity.append((sparse_dict['unpruned'] / sparse_dict['total']) * 100) for subdir", "in rand_property.items()]) plt.plot(sparsity[id], prop_l[id], 'o', label=f'lth layer{i+1}', color=colors_list[i]) plt.plot(sparsity[id], prop_rand_l[id], 'x', label=f'random layer{i+1}',", "args.layers for i, layer in enumerate(generator): # if args.layers > 0 and i", "f'property_{args.property}_{args.sub_dir}.pdf')) if __name__ == '__main__': # Arguement Parser parser = argparse.ArgumentParser() parser.add_argument(\"--dir\", type=str,", "+ args.layers: # continue if (i + 1) not in layers: continue prop_l", "k, v in property.items()]) prop_rand_l = np.array([v[layer] for k, v in rand_property.items()]) plt.plot(sparsity[id],", "sparsity = [] for dir in os.listdir(args.dir): # level directories if not 'level'", "prop_rand_l = np.array([v[layer] for k, v in rand_property.items()]) plt.plot(sparsity[id], prop_l[id], 'o', label=f'lth layer{i+1}',", "< len(generator) + args.layers: # continue if (i + 1) not in layers:", "= np.array([v[layer] for k, v in rand_property.items()]) plt.plot(sparsity[id], prop_l[id], 'o', label=f'lth layer{i+1}', color=colors_list[i])", "plt.legend() # save figure save_dir = os.path.join(args.dir, 'plots') if not os.path.exists(save_dir): os.makedirs(save_dir) plt.savefig(os.path.join(save_dir,", "dir in os.listdir(args.dir): # level directories if not 'level' in dir: continue with", "if not 'level' in dir: continue with open(os.path.join(args.dir, dir, 'main', 'sparsity_report.json'), 'rb') as", "level if 'properties' in subdir: path = os.path.join(args.dir, dir, subdir, f'properties_{args.property}.log') if args.property", "np.argsort(np.array(list(rand_property.keys()))) colors_list = ['blue', 'green', 'magenta', 'red', 'brown', 'cyan', 'purple', 'grey', 'orange', 'pink',", "len(args.layers) == 1 else args.layers for i, layer in enumerate(generator): # if args.layers", "args.layers: # break # elif args.layers < 0 and i < len(generator) +", "save_dir = os.path.join(args.dir, 'plots') if not os.path.exists(save_dir): os.makedirs(save_dir) plt.savefig(os.path.join(save_dir, f'property_{args.property}_{args.sub_dir}.pdf')) if __name__ ==", "sparse_dict = json.load(f_sparse) sparsity.append((sparse_dict['unpruned'] / sparse_dict['total']) * 100) for subdir in os.listdir(os.path.join(args.dir, dir)):", "as f_sparse: sparse_dict = json.load(f_sparse) sparsity.append((sparse_dict['unpruned'] / sparse_dict['total']) * 100) for subdir in", "'plots') if not os.path.exists(save_dir): os.makedirs(save_dir) plt.savefig(os.path.join(save_dir, f'property_{args.property}_{args.sub_dir}.pdf')) if __name__ == '__main__': # Arguement", "# break # elif args.layers < 0 and i < len(generator) + args.layers:", "= argparse.ArgumentParser() parser.add_argument(\"--dir\", type=str, help=\"Directory\") parser.add_argument(\"--property\", type=str, help=\"property\") parser.add_argument(\"--sub_dir\", type=str, help=\"property directory number\")", "args.property in path and args.sub_dir in path: with open(path, 'rb') as f: dict", "directory in level if 'properties' in subdir: path = os.path.join(args.dir, dir, subdir, f'properties_{args.property}.log')", "and i < len(generator) + args.layers: # continue if (i + 1) not", "path = os.path.join(args.dir, dir, subdir, f'properties_{args.property}.log') if args.property in path and args.sub_dir in", "continue with open(os.path.join(args.dir, dir, 'main', 'sparsity_report.json'), 'rb') as f_sparse: sparse_dict = json.load(f_sparse) sparsity.append((sparse_dict['unpruned']", "platforms.platform import get_platform import seaborn as sns def main(args): property= {} rand_property =", "import os import json import numpy as np from platforms.platform import get_platform import", "'cyan', 'purple', 'grey', 'orange', 'pink', 'lime'] generator = rand_property[0].keys() if 'weight' in args.property", "[range(args.layers[0])] if len(args.layers) == 1 else args.layers for i, layer in enumerate(generator): #", "prop_rand_l[id], 'x', label=f'random layer{i+1}', color=colors_list[i]) plt.yscale('log') plt.legend() # save figure save_dir = os.path.join(args.dir,", "* 100) for subdir in os.listdir(os.path.join(args.dir, dir)): # find the right directory in", "# save figure save_dir = os.path.join(args.dir, 'plots') if not os.path.exists(save_dir): os.makedirs(save_dir) plt.savefig(os.path.join(save_dir, f'property_{args.property}_{args.sub_dir}.pdf'))", "and args.sub_dir in path: with open(path, 'rb') as f: dict = json.load(f) property[int(dir.split('_')[1])]", "= dict['lth'] rand_property[int(dir.split('_')[1])] = dict['random'] sparsity = np.array(sparsity) keys = np.array(list(rand_property.keys())) id =", "= ['blue', 'green', 'magenta', 'red', 'brown', 'cyan', 'purple', 'grey', 'orange', 'pink', 'lime'] generator", "'rb') as f: dict = json.load(f) property[int(dir.split('_')[1])] = dict['lth'] rand_property[int(dir.split('_')[1])] = dict['random'] sparsity", "= rand_property[0].keys() if 'weight' in args.property else range(len(rand_property[0])) layers = [range(args.layers[0])] if len(args.layers)", "argparse import matplotlib matplotlib.use('pdf') import matplotlib.pyplot as plt import os import json import", "'__main__': # Arguement Parser parser = argparse.ArgumentParser() parser.add_argument(\"--dir\", type=str, help=\"Directory\") parser.add_argument(\"--property\", type=str, help=\"property\")", "> 0 and i >= args.layers: # break # elif args.layers < 0", "'lime'] generator = rand_property[0].keys() if 'weight' in args.property else range(len(rand_property[0])) layers = [range(args.layers[0])]", "== '__main__': # Arguement Parser parser = argparse.ArgumentParser() parser.add_argument(\"--dir\", type=str, help=\"Directory\") parser.add_argument(\"--property\", type=str,", "with open(os.path.join(args.dir, dir, 'main', 'sparsity_report.json'), 'rb') as f_sparse: sparse_dict = json.load(f_sparse) sparsity.append((sparse_dict['unpruned'] /", "args.property else range(len(rand_property[0])) layers = [range(args.layers[0])] if len(args.layers) == 1 else args.layers for", "1) not in layers: continue prop_l = np.array([v[layer] for k, v in property.items()])", "dict['lth'] rand_property[int(dir.split('_')[1])] = dict['random'] sparsity = np.array(sparsity) keys = np.array(list(rand_property.keys())) id = np.argsort(np.array(list(rand_property.keys())))", "== 1 else args.layers for i, layer in enumerate(generator): # if args.layers >", "# Arguement Parser parser = argparse.ArgumentParser() parser.add_argument(\"--dir\", type=str, help=\"Directory\") parser.add_argument(\"--property\", type=str, help=\"property\") parser.add_argument(\"--sub_dir\",", "layer{i+1}', color=colors_list[i]) plt.yscale('log') plt.legend() # save figure save_dir = os.path.join(args.dir, 'plots') if not", "not 'level' in dir: continue with open(os.path.join(args.dir, dir, 'main', 'sparsity_report.json'), 'rb') as f_sparse:", "path: with open(path, 'rb') as f: dict = json.load(f) property[int(dir.split('_')[1])] = dict['lth'] rand_property[int(dir.split('_')[1])]", "'o', label=f'lth layer{i+1}', color=colors_list[i]) plt.plot(sparsity[id], prop_rand_l[id], 'x', label=f'random layer{i+1}', color=colors_list[i]) plt.yscale('log') plt.legend() #", "100) for subdir in os.listdir(os.path.join(args.dir, dir)): # find the right directory in level", "import seaborn as sns def main(args): property= {} rand_property = {} sparsity =", "range(len(rand_property[0])) layers = [range(args.layers[0])] if len(args.layers) == 1 else args.layers for i, layer" ]
[ "= StringVar() self.MaxFeatures_Float.set('0.1') self.MaxFeatures_None = ttk.Radiobutton(frame, text='None', variable=self.MaxFeatures, value='none') self.MaxFeatures_Integer_Button = ttk.Radiobutton(frame, text='Number:',", "self.LearningRate = StringVar() self.LearningRate.set('0.1') self.LearningRate_Box = Spinbox(frame, textvariable=self.LearningRate, from_=0.0, to=1.0, increment=0.01, width=5) #Options", "text='Min Samples to Split:') self.MinSamplesSplit = StringVar() self.MinSamplesSplit.set('2') self.MinSamplesSplit_Box = ttk.Entry(frame, textvariable=self.MinSamplesSplit, width=7)", "verbosity self.Verbose_Label = ttk.Label(frame, text='Verbose Level:') self.Verbose = StringVar() self.Verbose.set('0') self.Verbose_Box = ttk.Entry(frame,", "self.MaxFeatures_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxFeatures, value='integer') self.MaxFeatures_Integer_Box = ttk.Entry(frame, textvariable=self.MaxFeatures_Integer, width=7) self.MaxFeatures_Float_Button =", "for the loss criteria. self.Loss_Label = ttk.Label(frame, text=\"Loss Function:\") self.Loss = StringVar() self.Loss.set('deviance')", "sticky=(W)) self.MaxFeatures_Auto.grid(column=0, row=12, sticky=(W)) self.MaxFeatures_Log2.grid(column=0, row=12, sticky=(W)) self.MaxLeafNodes_Label.grid(column=0, row=13, sticky=(W)) self.MaxLeafNodes_None.grid(column=0, row=14, sticky=(W))", "for the minimum number of samples before an internal node is split. self.MinSamplesSplit_Label", "* from tkinter import ttk class Gradient_Boosting_Classifier(Algorithm): def __init__(self, frame): self.frame = frame", "self.MaxLeafNodes_Integer_Box = ttk.Entry(frame, textvariable=self.MaxLeafNodes_Integer, width=7) #Options for verbosity self.Verbose_Label = ttk.Label(frame, text='Verbose Level:')", "self.MinSamplesSplit_Box.grid(column=2, row=4, sticky=(W)) self.MinSamplesLeaf_Label.grid(column=0, columnspan=2, row=5, sticky=(W)) self.MinSamplesLeaf_Box.grid(column=2, row=5, sticky=(W)) self.MinFractionLeaf_Label.grid(column=0, columnspan=2, row=6,", "self.MaxFeatures_Auto.grid(column=0, row=12, sticky=(W)) self.MaxFeatures_Log2.grid(column=0, row=12, sticky=(W)) self.MaxLeafNodes_Label.grid(column=0, row=13, sticky=(W)) self.MaxLeafNodes_None.grid(column=0, row=14, sticky=(W)) self.MaxLeafNodes_Integer_Button.grid(column=0,", "sticky=(W)) self.LearningRate_Box.grid(column=1, row=1, sticky=(W)) self.Estimators_Label.grid(column=0, row=2, sticky=(W)) self.Estimators_Box.grid(column=1, row=2, sticky=(W)) self.MaxDepth_Label.grid(column=0, row=3, sticky=(W))", "to Split:') self.MinSamplesSplit = StringVar() self.MinSamplesSplit.set('2') self.MinSamplesSplit_Box = ttk.Entry(frame, textvariable=self.MinSamplesSplit, width=7) #Options for", "= ttk.Radiobutton(frame, text='Number:', variable=self.MaxFeatures, value='integer') self.MaxFeatures_Integer_Box = ttk.Entry(frame, textvariable=self.MaxFeatures_Integer, width=7) self.MaxFeatures_Float_Button = ttk.Radiobutton(frame,", "text='Percentage:', variable=self.MaxFeatures, value='float') self.MaxFeatures_Float_Box = Spinbox(frame, from_=0.0, to=1.0, textvariable=self.MaxFeatures_Float, width=5, increment=0.01) self.MaxFeatures_Auto =", "sticky=(W)) self.MaxFeatures_Integer_Box.grid(column=1, row=10, sticky=(W)) self.MaxFeatures_Float_Button.grid(column=0, row=11, sticky=(W)) self.MaxFeatures_Float_Box.grid(column=1, row=11, sticky=(W)) self.MaxFeatures_Auto.grid(column=0, row=12, sticky=(W))", "= ttk.Radiobutton(frame, text='Number:', variable=self.MaxLeafNodes, value='integer') self.MaxLeafNodes_Integer_Box = ttk.Entry(frame, textvariable=self.MaxLeafNodes_Integer, width=7) #Options for verbosity", "= ttk.Entry(frame, textvariable=self.MaxDepth, width=7) #Options for the minimum number of samples before an", "= \"Gradient Boosting Classifier\" #Options for the loss criteria. self.Loss_Label = ttk.Label(frame, text=\"Loss", "width=5, increment=0.01) self.MaxFeatures_Auto = ttk.Radiobutton(frame, text='Auto', variable=self.MaxFeatures, value='auto') self.MaxFeatures_Log2 = ttk.Radiobutton(frame, text='Log2', variable=self.MaxFeatures,", "sticky=(W)) self.MaxFeatures_Integer_Button.grid(column=0, row=10, sticky=(W)) self.MaxFeatures_Integer_Box.grid(column=1, row=10, sticky=(W)) self.MaxFeatures_Float_Button.grid(column=0, row=11, sticky=(W)) self.MaxFeatures_Float_Box.grid(column=1, row=11, sticky=(W))", "from algorithm import Algorithm from tkinter import * from tkinter import ttk class", "to=1.0, increment=0.01, textvariable=self.Subsample, width=5) #Options for max features. self.MaxFeatures_Label = ttk.Label(frame, text='Max Features:')", "the max depth self.MaxDepth_Label = ttk.Label(frame, text='Max Depth:') self.MaxDepth = StringVar() self.MaxDepth.set('0') self.MaxDepth_Box", "row=10, sticky=(W)) self.MaxFeatures_Float_Button.grid(column=0, row=11, sticky=(W)) self.MaxFeatures_Float_Box.grid(column=1, row=11, sticky=(W)) self.MaxFeatures_Auto.grid(column=0, row=12, sticky=(W)) self.MaxFeatures_Log2.grid(column=0, row=12,", "StringVar() self.LearningRate.set('0.1') self.LearningRate_Box = Spinbox(frame, textvariable=self.LearningRate, from_=0.0, to=1.0, increment=0.01, width=5) #Options for the", "self.LearningRate.set('0.1') self.LearningRate_Box = Spinbox(frame, textvariable=self.LearningRate, from_=0.0, to=1.0, increment=0.01, width=5) #Options for the number", "= ttk.Label(frame, text='Max Leaf Nodes:') self.MaxLeafNodes = StringVar() self.MaxLeafNodes.set('none') self.MaxLeafNodes_None = ttk.Radiobutton(frame, text='None',", "ttk.Label(frame, text=\"Loss Function:\") self.Loss = StringVar() self.Loss.set('deviance') self.Loss_Deviance = ttk.Radiobutton(frame, text='Deviance', variable=self.Loss, value='deviance')", "self.Estimators.set('100') self.Estimators_Box = ttk.Entry(frame, textvariable=self.Estimators, width=7) #Options for the max depth self.MaxDepth_Label =", "for the minimum number of leaf nodes self.MinSamplesLeaf_Label = ttk.Label(frame, text='Min # of", "text='Log2', variable=self.MaxFeatures, value='log2') #Options for the max # of leaf nodes self.MaxLeafNodes_Label =", "self.LearningRate_Box.grid(column=1, row=1, sticky=(W)) self.Estimators_Label.grid(column=0, row=2, sticky=(W)) self.Estimators_Box.grid(column=1, row=2, sticky=(W)) self.MaxDepth_Label.grid(column=0, row=3, sticky=(W)) self.MaxDepth_Box.grid(column=1,", "row=4, sticky=(W)) self.MinSamplesLeaf_Label.grid(column=0, columnspan=2, row=5, sticky=(W)) self.MinSamplesLeaf_Box.grid(column=2, row=5, sticky=(W)) self.MinFractionLeaf_Label.grid(column=0, columnspan=2, row=6, sticky=(W))", "Nodes:') self.MinFractionLeaf = StringVar() self.MinFractionLeaf.set('0.0') self.MinFractionLeaf_Box = ttk.Entry(frame, textvariable=self.MinFractionLeaf, width=7) #Options for batch", "frame self.name = \"Gradient Boosting Classifier\" #Options for the loss criteria. self.Loss_Label =", "StringVar() self.MaxLeafNodes.set('none') self.MaxLeafNodes_None = ttk.Radiobutton(frame, text='None', variable=self.MaxLeafNodes, value='none') self.MaxLeafNodes_Integer = StringVar() self.MaxLeafNodes_Integer.set('0') self.MaxLeafNodes_Integer_Button", "StringVar() self.MinSamplesLeaf.set('1') self.MinSamplesLeaf_Box = ttk.Entry(frame, textvariable=self.MinSamplesLeaf, width=7) #Options for the minimum fraction of", "self.MaxLeafNodes_None.grid(column=0, row=14, sticky=(W)) self.MaxLeafNodes_Integer_Button.grid(column=0, row=15, sticky=(W)) self.MaxLeafNodes_Integer_Box.grid(column=1, row=15, sticky=(W)) self.Verbose_Label.grid(column=0, row=16, sticky=(W)) self.Verbose_Box.grid(column=1,", "minimum number of samples before an internal node is split. self.MinSamplesSplit_Label = ttk.Label(frame,", "self.MaxFeatures_None.grid(column=0, row=9, sticky=(W)) self.MaxFeatures_Integer_Button.grid(column=0, row=10, sticky=(W)) self.MaxFeatures_Integer_Box.grid(column=1, row=10, sticky=(W)) self.MaxFeatures_Float_Button.grid(column=0, row=11, sticky=(W)) self.MaxFeatures_Float_Box.grid(column=1,", "for batch size self.Subsample_Label = ttk.Label(frame, text='Batch Size:') self.Subsample = StringVar() self.Subsample.set('1.0') self.Subsample_Box", "of Stages:') self.Estimators = StringVar() self.Estimators.set('100') self.Estimators_Box = ttk.Entry(frame, textvariable=self.Estimators, width=7) #Options for", "= StringVar() self.MaxDepth.set('0') self.MaxDepth_Box = ttk.Entry(frame, textvariable=self.MaxDepth, width=7) #Options for the minimum number", "= ttk.Label(frame, text='Verbose Level:') self.Verbose = StringVar() self.Verbose.set('0') self.Verbose_Box = ttk.Entry(frame, textvariable=self.Verbose, width=7)", "variable=self.Loss, value='exponential') #Options for the learning rate. self.LearningRate_Label = ttk.Label(frame, text=\"Learning Rate:\") self.LearningRate", "fraction of leaf nodes self.MinFractionLeaf_Label = ttk.Label(frame, text='Min % of Leaf Nodes:') self.MinFractionLeaf", "for the max # of leaf nodes self.MaxLeafNodes_Label = ttk.Label(frame, text='Max Leaf Nodes:')", "= frame self.name = \"Gradient Boosting Classifier\" #Options for the loss criteria. self.Loss_Label", "row=14, sticky=(W)) self.MaxLeafNodes_Integer_Button.grid(column=0, row=15, sticky=(W)) self.MaxLeafNodes_Integer_Box.grid(column=1, row=15, sticky=(W)) self.Verbose_Label.grid(column=0, row=16, sticky=(W)) self.Verbose_Box.grid(column=1, row=16,", "textvariable=self.LearningRate, from_=0.0, to=1.0, increment=0.01, width=5) #Options for the number of boosting stages. self.Estimators_Label", "import Algorithm from tkinter import * from tkinter import ttk class Gradient_Boosting_Classifier(Algorithm): def", "self.MinFractionLeaf.set('0.0') self.MinFractionLeaf_Box = ttk.Entry(frame, textvariable=self.MinFractionLeaf, width=7) #Options for batch size self.Subsample_Label = ttk.Label(frame,", "ttk.Entry(frame, textvariable=self.MinFractionLeaf, width=7) #Options for batch size self.Subsample_Label = ttk.Label(frame, text='Batch Size:') self.Subsample", "Depth:') self.MaxDepth = StringVar() self.MaxDepth.set('0') self.MaxDepth_Box = ttk.Entry(frame, textvariable=self.MaxDepth, width=7) #Options for the", "sticky=(W)) self.MinSamplesSplit_Box.grid(column=2, row=4, sticky=(W)) self.MinSamplesLeaf_Label.grid(column=0, columnspan=2, row=5, sticky=(W)) self.MinSamplesLeaf_Box.grid(column=2, row=5, sticky=(W)) self.MinFractionLeaf_Label.grid(column=0, columnspan=2,", "of boosting stages. self.Estimators_Label = ttk.Label(frame, text='# of Stages:') self.Estimators = StringVar() self.Estimators.set('100')", "width=7) #Options for verbosity self.Verbose_Label = ttk.Label(frame, text='Verbose Level:') self.Verbose = StringVar() self.Verbose.set('0')", "self.MaxDepth_Label.grid(column=0, row=3, sticky=(W)) self.MaxDepth_Box.grid(column=1, row=3, sticky=(W)) self.MinSamplesSplit_Label.grid(column=0, columnspan=2, row=4, sticky=(W)) self.MinSamplesSplit_Box.grid(column=2, row=4, sticky=(W))", "row=3, sticky=(W)) self.MaxDepth_Box.grid(column=1, row=3, sticky=(W)) self.MinSamplesSplit_Label.grid(column=0, columnspan=2, row=4, sticky=(W)) self.MinSamplesSplit_Box.grid(column=2, row=4, sticky=(W)) self.MinSamplesLeaf_Label.grid(column=0,", "self.MaxLeafNodes_Label.grid(column=0, row=13, sticky=(W)) self.MaxLeafNodes_None.grid(column=0, row=14, sticky=(W)) self.MaxLeafNodes_Integer_Button.grid(column=0, row=15, sticky=(W)) self.MaxLeafNodes_Integer_Box.grid(column=1, row=15, sticky=(W)) self.Verbose_Label.grid(column=0,", "row=2, sticky=(W)) self.MaxDepth_Label.grid(column=0, row=3, sticky=(W)) self.MaxDepth_Box.grid(column=1, row=3, sticky=(W)) self.MinSamplesSplit_Label.grid(column=0, columnspan=2, row=4, sticky=(W)) self.MinSamplesSplit_Box.grid(column=2,", "variable=self.MaxFeatures, value='log2') #Options for the max # of leaf nodes self.MaxLeafNodes_Label = ttk.Label(frame,", "minimum fraction of leaf nodes self.MinFractionLeaf_Label = ttk.Label(frame, text='Min % of Leaf Nodes:')", "sticky=(W)) self.MaxFeatures_Log2.grid(column=0, row=12, sticky=(W)) self.MaxLeafNodes_Label.grid(column=0, row=13, sticky=(W)) self.MaxLeafNodes_None.grid(column=0, row=14, sticky=(W)) self.MaxLeafNodes_Integer_Button.grid(column=0, row=15, sticky=(W))", "options for the Decision Tree Classifier. self.clear_frame(self.frame) #Insert the options into the frame.", "ttk.Label(frame, text='Max Features:') self.MaxFeatures = StringVar() self.MaxFeatures.set('none') self.MaxFeatures_Integer = StringVar() self.MaxFeatures_Float = StringVar()", "self.Loss.set('deviance') self.Loss_Deviance = ttk.Radiobutton(frame, text='Deviance', variable=self.Loss, value='deviance') self.Loss_Exponential = ttk.Radiobutton(frame, text='Exponential', variable=self.Loss, value='exponential')", "#Options for batch size self.Subsample_Label = ttk.Label(frame, text='Batch Size:') self.Subsample = StringVar() self.Subsample.set('1.0')", "self.Loss_Label = ttk.Label(frame, text=\"Loss Function:\") self.Loss = StringVar() self.Loss.set('deviance') self.Loss_Deviance = ttk.Radiobutton(frame, text='Deviance',", "self.MaxFeatures_Integer = StringVar() self.MaxFeatures_Float = StringVar() self.MaxFeatures_Float.set('0.1') self.MaxFeatures_None = ttk.Radiobutton(frame, text='None', variable=self.MaxFeatures, value='none')", "stages. self.Estimators_Label = ttk.Label(frame, text='# of Stages:') self.Estimators = StringVar() self.Estimators.set('100') self.Estimators_Box =", "self.Loss_Deviance.grid(column=1, row=0, sticky=(W)) self.Loss_Exponential.grid(column=2, row=0, sticky=(W)) self.LearningRate_Label.grid(column=0, row=1, sticky=(W)) self.LearningRate_Box.grid(column=1, row=1, sticky=(W)) self.Estimators_Label.grid(column=0,", "textvariable=self.MaxFeatures_Float, width=5, increment=0.01) self.MaxFeatures_Auto = ttk.Radiobutton(frame, text='Auto', variable=self.MaxFeatures, value='auto') self.MaxFeatures_Log2 = ttk.Radiobutton(frame, text='Log2',", "ttk.Label(frame, text='Min Samples to Split:') self.MinSamplesSplit = StringVar() self.MinSamplesSplit.set('2') self.MinSamplesSplit_Box = ttk.Entry(frame, textvariable=self.MinSamplesSplit,", "self.MinSamplesLeaf = StringVar() self.MinSamplesLeaf.set('1') self.MinSamplesLeaf_Box = ttk.Entry(frame, textvariable=self.MinSamplesLeaf, width=7) #Options for the minimum", "width=7) #Options for the minimum number of samples before an internal node is", "Rate:\") self.LearningRate = StringVar() self.LearningRate.set('0.1') self.LearningRate_Box = Spinbox(frame, textvariable=self.LearningRate, from_=0.0, to=1.0, increment=0.01, width=5)", "sticky=(W)) self.MaxDepth_Label.grid(column=0, row=3, sticky=(W)) self.MaxDepth_Box.grid(column=1, row=3, sticky=(W)) self.MinSamplesSplit_Label.grid(column=0, columnspan=2, row=4, sticky=(W)) self.MinSamplesSplit_Box.grid(column=2, row=4,", "variable=self.MaxFeatures, value='float') self.MaxFeatures_Float_Box = Spinbox(frame, from_=0.0, to=1.0, textvariable=self.MaxFeatures_Float, width=5, increment=0.01) self.MaxFeatures_Auto = ttk.Radiobutton(frame,", "variable=self.MaxFeatures, value='auto') self.MaxFeatures_Log2 = ttk.Radiobutton(frame, text='Log2', variable=self.MaxFeatures, value='log2') #Options for the max #", "self.MinSamplesLeaf_Label.grid(column=0, columnspan=2, row=5, sticky=(W)) self.MinSamplesLeaf_Box.grid(column=2, row=5, sticky=(W)) self.MinFractionLeaf_Label.grid(column=0, columnspan=2, row=6, sticky=(W)) self.MinFractionLeaf_Box.grid(column=2, row=6,", "ttk.Radiobutton(frame, text='Number:', variable=self.MaxLeafNodes, value='integer') self.MaxLeafNodes_Integer_Box = ttk.Entry(frame, textvariable=self.MaxLeafNodes_Integer, width=7) #Options for verbosity self.Verbose_Label", "sticky=(W)) self.MaxDepth_Box.grid(column=1, row=3, sticky=(W)) self.MinSamplesSplit_Label.grid(column=0, columnspan=2, row=4, sticky=(W)) self.MinSamplesSplit_Box.grid(column=2, row=4, sticky=(W)) self.MinSamplesLeaf_Label.grid(column=0, columnspan=2,", "row=2, sticky=(W)) self.Estimators_Box.grid(column=1, row=2, sticky=(W)) self.MaxDepth_Label.grid(column=0, row=3, sticky=(W)) self.MaxDepth_Box.grid(column=1, row=3, sticky=(W)) self.MinSamplesSplit_Label.grid(column=0, columnspan=2,", "self.name = \"Gradient Boosting Classifier\" #Options for the loss criteria. self.Loss_Label = ttk.Label(frame,", "sticky=(W)) self.MinFractionLeaf_Label.grid(column=0, columnspan=2, row=6, sticky=(W)) self.MinFractionLeaf_Box.grid(column=2, row=6, sticky=(W)) self.Subsample_Label.grid(column=0, row=7, sticky=(W)) self.Subsample_Box.grid(column=1, row=7,", "text='None', variable=self.MaxLeafNodes, value='none') self.MaxLeafNodes_Integer = StringVar() self.MaxLeafNodes_Integer.set('0') self.MaxLeafNodes_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxLeafNodes, value='integer')", "self.clear_frame(self.frame) #Insert the options into the frame. self.Loss_Label.grid(column=0,row=0, sticky=(W)) self.Loss_Deviance.grid(column=1, row=0, sticky=(W)) self.Loss_Exponential.grid(column=2,", "ttk.Label(frame, text='Max Leaf Nodes:') self.MaxLeafNodes = StringVar() self.MaxLeafNodes.set('none') self.MaxLeafNodes_None = ttk.Radiobutton(frame, text='None', variable=self.MaxLeafNodes,", "split. self.MinSamplesSplit_Label = ttk.Label(frame, text='Min Samples to Split:') self.MinSamplesSplit = StringVar() self.MinSamplesSplit.set('2') self.MinSamplesSplit_Box", "text='Min % of Leaf Nodes:') self.MinFractionLeaf = StringVar() self.MinFractionLeaf.set('0.0') self.MinFractionLeaf_Box = ttk.Entry(frame, textvariable=self.MinFractionLeaf,", "Samples to Split:') self.MinSamplesSplit = StringVar() self.MinSamplesSplit.set('2') self.MinSamplesSplit_Box = ttk.Entry(frame, textvariable=self.MinSamplesSplit, width=7) #Options", "from_=0.0, to=1.0, increment=0.01, textvariable=self.Subsample, width=5) #Options for max features. self.MaxFeatures_Label = ttk.Label(frame, text='Max", "#Options for the minimum number of leaf nodes self.MinSamplesLeaf_Label = ttk.Label(frame, text='Min #", "ttk.Radiobutton(frame, text='Log2', variable=self.MaxFeatures, value='log2') #Options for the max # of leaf nodes self.MaxLeafNodes_Label", "row=1, sticky=(W)) self.Estimators_Label.grid(column=0, row=2, sticky=(W)) self.Estimators_Box.grid(column=1, row=2, sticky=(W)) self.MaxDepth_Label.grid(column=0, row=3, sticky=(W)) self.MaxDepth_Box.grid(column=1, row=3,", "from tkinter import * from tkinter import ttk class Gradient_Boosting_Classifier(Algorithm): def __init__(self, frame):", "text='Number:', variable=self.MaxFeatures, value='integer') self.MaxFeatures_Integer_Box = ttk.Entry(frame, textvariable=self.MaxFeatures_Integer, width=7) self.MaxFeatures_Float_Button = ttk.Radiobutton(frame, text='Percentage:', variable=self.MaxFeatures,", "self.Verbose = StringVar() self.Verbose.set('0') self.Verbose_Box = ttk.Entry(frame, textvariable=self.Verbose, width=7) def Display_Options(self): #Display options", "tkinter import ttk class Gradient_Boosting_Classifier(Algorithm): def __init__(self, frame): self.frame = frame self.name =", "= ttk.Entry(frame, textvariable=self.Estimators, width=7) #Options for the max depth self.MaxDepth_Label = ttk.Label(frame, text='Max", "= StringVar() self.Loss.set('deviance') self.Loss_Deviance = ttk.Radiobutton(frame, text='Deviance', variable=self.Loss, value='deviance') self.Loss_Exponential = ttk.Radiobutton(frame, text='Exponential',", "sticky=(W)) self.Subsample_Label.grid(column=0, row=7, sticky=(W)) self.Subsample_Box.grid(column=1, row=7, sticky=(W)) self.MaxFeatures_Label.grid(column=0, row=8, sticky=(W)) self.MaxFeatures_None.grid(column=0, row=9, sticky=(W))", "variable=self.Loss, value='deviance') self.Loss_Exponential = ttk.Radiobutton(frame, text='Exponential', variable=self.Loss, value='exponential') #Options for the learning rate.", "= ttk.Entry(frame, textvariable=self.MinSamplesSplit, width=7) #Options for the minimum number of leaf nodes self.MinSamplesLeaf_Label", "def Display_Options(self): #Display options for the Decision Tree Classifier. self.clear_frame(self.frame) #Insert the options", "self.LearningRate_Box = Spinbox(frame, textvariable=self.LearningRate, from_=0.0, to=1.0, increment=0.01, width=5) #Options for the number of", "= ttk.Label(frame, text=\"Learning Rate:\") self.LearningRate = StringVar() self.LearningRate.set('0.1') self.LearningRate_Box = Spinbox(frame, textvariable=self.LearningRate, from_=0.0,", "= ttk.Radiobutton(frame, text='Log2', variable=self.MaxFeatures, value='log2') #Options for the max # of leaf nodes", "width=5) #Options for the number of boosting stages. self.Estimators_Label = ttk.Label(frame, text='# of", "ttk class Gradient_Boosting_Classifier(Algorithm): def __init__(self, frame): self.frame = frame self.name = \"Gradient Boosting", "for the Decision Tree Classifier. self.clear_frame(self.frame) #Insert the options into the frame. self.Loss_Label.grid(column=0,row=0,", "leaf nodes self.MinFractionLeaf_Label = ttk.Label(frame, text='Min % of Leaf Nodes:') self.MinFractionLeaf = StringVar()", "variable=self.MaxFeatures, value='integer') self.MaxFeatures_Integer_Box = ttk.Entry(frame, textvariable=self.MaxFeatures_Integer, width=7) self.MaxFeatures_Float_Button = ttk.Radiobutton(frame, text='Percentage:', variable=self.MaxFeatures, value='float')", "sticky=(W)) self.MaxLeafNodes_Integer_Button.grid(column=0, row=15, sticky=(W)) self.MaxLeafNodes_Integer_Box.grid(column=1, row=15, sticky=(W)) self.Verbose_Label.grid(column=0, row=16, sticky=(W)) self.Verbose_Box.grid(column=1, row=16, sticky=(W))", "ttk.Radiobutton(frame, text='None', variable=self.MaxFeatures, value='none') self.MaxFeatures_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxFeatures, value='integer') self.MaxFeatures_Integer_Box = ttk.Entry(frame,", "width=7) self.MaxFeatures_Float_Button = ttk.Radiobutton(frame, text='Percentage:', variable=self.MaxFeatures, value='float') self.MaxFeatures_Float_Box = Spinbox(frame, from_=0.0, to=1.0, textvariable=self.MaxFeatures_Float,", "self.MinSamplesLeaf_Box.grid(column=2, row=5, sticky=(W)) self.MinFractionLeaf_Label.grid(column=0, columnspan=2, row=6, sticky=(W)) self.MinFractionLeaf_Box.grid(column=2, row=6, sticky=(W)) self.Subsample_Label.grid(column=0, row=7, sticky=(W))", "the max # of leaf nodes self.MaxLeafNodes_Label = ttk.Label(frame, text='Max Leaf Nodes:') self.MaxLeafNodes", "from tkinter import ttk class Gradient_Boosting_Classifier(Algorithm): def __init__(self, frame): self.frame = frame self.name", "= StringVar() self.Verbose.set('0') self.Verbose_Box = ttk.Entry(frame, textvariable=self.Verbose, width=7) def Display_Options(self): #Display options for", "self.Verbose.set('0') self.Verbose_Box = ttk.Entry(frame, textvariable=self.Verbose, width=7) def Display_Options(self): #Display options for the Decision", "row=4, sticky=(W)) self.MinSamplesSplit_Box.grid(column=2, row=4, sticky=(W)) self.MinSamplesLeaf_Label.grid(column=0, columnspan=2, row=5, sticky=(W)) self.MinSamplesLeaf_Box.grid(column=2, row=5, sticky=(W)) self.MinFractionLeaf_Label.grid(column=0,", "ttk.Label(frame, text='Batch Size:') self.Subsample = StringVar() self.Subsample.set('1.0') self.Subsample_Box = Spinbox(frame, from_=0.0, to=1.0, increment=0.01,", "self.MinFractionLeaf = StringVar() self.MinFractionLeaf.set('0.0') self.MinFractionLeaf_Box = ttk.Entry(frame, textvariable=self.MinFractionLeaf, width=7) #Options for batch size", "self.MaxLeafNodes_Label = ttk.Label(frame, text='Max Leaf Nodes:') self.MaxLeafNodes = StringVar() self.MaxLeafNodes.set('none') self.MaxLeafNodes_None = ttk.Radiobutton(frame,", "textvariable=self.MaxDepth, width=7) #Options for the minimum number of samples before an internal node", "self.MaxLeafNodes_None = ttk.Radiobutton(frame, text='None', variable=self.MaxLeafNodes, value='none') self.MaxLeafNodes_Integer = StringVar() self.MaxLeafNodes_Integer.set('0') self.MaxLeafNodes_Integer_Button = ttk.Radiobutton(frame,", "max features. self.MaxFeatures_Label = ttk.Label(frame, text='Max Features:') self.MaxFeatures = StringVar() self.MaxFeatures.set('none') self.MaxFeatures_Integer =", "self.Estimators_Box.grid(column=1, row=2, sticky=(W)) self.MaxDepth_Label.grid(column=0, row=3, sticky=(W)) self.MaxDepth_Box.grid(column=1, row=3, sticky=(W)) self.MinSamplesSplit_Label.grid(column=0, columnspan=2, row=4, sticky=(W))", "value='log2') #Options for the max # of leaf nodes self.MaxLeafNodes_Label = ttk.Label(frame, text='Max", "# of leaf nodes self.MaxLeafNodes_Label = ttk.Label(frame, text='Max Leaf Nodes:') self.MaxLeafNodes = StringVar()", "textvariable=self.Verbose, width=7) def Display_Options(self): #Display options for the Decision Tree Classifier. self.clear_frame(self.frame) #Insert", "value='auto') self.MaxFeatures_Log2 = ttk.Radiobutton(frame, text='Log2', variable=self.MaxFeatures, value='log2') #Options for the max # of", "def __init__(self, frame): self.frame = frame self.name = \"Gradient Boosting Classifier\" #Options for", "StringVar() self.MaxLeafNodes_Integer.set('0') self.MaxLeafNodes_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxLeafNodes, value='integer') self.MaxLeafNodes_Integer_Box = ttk.Entry(frame, textvariable=self.MaxLeafNodes_Integer, width=7)", "for the number of boosting stages. self.Estimators_Label = ttk.Label(frame, text='# of Stages:') self.Estimators", "ttk.Label(frame, text='Min # of Leaf Nodes:') self.MinSamplesLeaf = StringVar() self.MinSamplesLeaf.set('1') self.MinSamplesLeaf_Box = ttk.Entry(frame,", "self.Subsample.set('1.0') self.Subsample_Box = Spinbox(frame, from_=0.0, to=1.0, increment=0.01, textvariable=self.Subsample, width=5) #Options for max features.", "Nodes:') self.MaxLeafNodes = StringVar() self.MaxLeafNodes.set('none') self.MaxLeafNodes_None = ttk.Radiobutton(frame, text='None', variable=self.MaxLeafNodes, value='none') self.MaxLeafNodes_Integer =", "= ttk.Label(frame, text='# of Stages:') self.Estimators = StringVar() self.Estimators.set('100') self.Estimators_Box = ttk.Entry(frame, textvariable=self.Estimators,", "Size:') self.Subsample = StringVar() self.Subsample.set('1.0') self.Subsample_Box = Spinbox(frame, from_=0.0, to=1.0, increment=0.01, textvariable=self.Subsample, width=5)", "text='Deviance', variable=self.Loss, value='deviance') self.Loss_Exponential = ttk.Radiobutton(frame, text='Exponential', variable=self.Loss, value='exponential') #Options for the learning", "row=11, sticky=(W)) self.MaxFeatures_Auto.grid(column=0, row=12, sticky=(W)) self.MaxFeatures_Log2.grid(column=0, row=12, sticky=(W)) self.MaxLeafNodes_Label.grid(column=0, row=13, sticky=(W)) self.MaxLeafNodes_None.grid(column=0, row=14,", "text='Number:', variable=self.MaxLeafNodes, value='integer') self.MaxLeafNodes_Integer_Box = ttk.Entry(frame, textvariable=self.MaxLeafNodes_Integer, width=7) #Options for verbosity self.Verbose_Label =", "\"Gradient Boosting Classifier\" #Options for the loss criteria. self.Loss_Label = ttk.Label(frame, text=\"Loss Function:\")", "text='Batch Size:') self.Subsample = StringVar() self.Subsample.set('1.0') self.Subsample_Box = Spinbox(frame, from_=0.0, to=1.0, increment=0.01, textvariable=self.Subsample,", "variable=self.MaxLeafNodes, value='none') self.MaxLeafNodes_Integer = StringVar() self.MaxLeafNodes_Integer.set('0') self.MaxLeafNodes_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxLeafNodes, value='integer') self.MaxLeafNodes_Integer_Box", "sticky=(W)) self.MaxFeatures_None.grid(column=0, row=9, sticky=(W)) self.MaxFeatures_Integer_Button.grid(column=0, row=10, sticky=(W)) self.MaxFeatures_Integer_Box.grid(column=1, row=10, sticky=(W)) self.MaxFeatures_Float_Button.grid(column=0, row=11, sticky=(W))", "= ttk.Label(frame, text='Min % of Leaf Nodes:') self.MinFractionLeaf = StringVar() self.MinFractionLeaf.set('0.0') self.MinFractionLeaf_Box =", "batch size self.Subsample_Label = ttk.Label(frame, text='Batch Size:') self.Subsample = StringVar() self.Subsample.set('1.0') self.Subsample_Box =", "= ttk.Entry(frame, textvariable=self.MinSamplesLeaf, width=7) #Options for the minimum fraction of leaf nodes self.MinFractionLeaf_Label", "= ttk.Radiobutton(frame, text='Exponential', variable=self.Loss, value='exponential') #Options for the learning rate. self.LearningRate_Label = ttk.Label(frame,", "for the learning rate. self.LearningRate_Label = ttk.Label(frame, text=\"Learning Rate:\") self.LearningRate = StringVar() self.LearningRate.set('0.1')", "features. self.MaxFeatures_Label = ttk.Label(frame, text='Max Features:') self.MaxFeatures = StringVar() self.MaxFeatures.set('none') self.MaxFeatures_Integer = StringVar()", "sticky=(W)) self.MaxFeatures_Label.grid(column=0, row=8, sticky=(W)) self.MaxFeatures_None.grid(column=0, row=9, sticky=(W)) self.MaxFeatures_Integer_Button.grid(column=0, row=10, sticky=(W)) self.MaxFeatures_Integer_Box.grid(column=1, row=10, sticky=(W))", "row=10, sticky=(W)) self.MaxFeatures_Integer_Box.grid(column=1, row=10, sticky=(W)) self.MaxFeatures_Float_Button.grid(column=0, row=11, sticky=(W)) self.MaxFeatures_Float_Box.grid(column=1, row=11, sticky=(W)) self.MaxFeatures_Auto.grid(column=0, row=12,", "Classifier\" #Options for the loss criteria. self.Loss_Label = ttk.Label(frame, text=\"Loss Function:\") self.Loss =", "sticky=(W)) self.MaxFeatures_Float_Box.grid(column=1, row=11, sticky=(W)) self.MaxFeatures_Auto.grid(column=0, row=12, sticky=(W)) self.MaxFeatures_Log2.grid(column=0, row=12, sticky=(W)) self.MaxLeafNodes_Label.grid(column=0, row=13, sticky=(W))", "row=12, sticky=(W)) self.MaxLeafNodes_Label.grid(column=0, row=13, sticky=(W)) self.MaxLeafNodes_None.grid(column=0, row=14, sticky=(W)) self.MaxLeafNodes_Integer_Button.grid(column=0, row=15, sticky=(W)) self.MaxLeafNodes_Integer_Box.grid(column=1, row=15,", "loss criteria. self.Loss_Label = ttk.Label(frame, text=\"Loss Function:\") self.Loss = StringVar() self.Loss.set('deviance') self.Loss_Deviance =", "value='deviance') self.Loss_Exponential = ttk.Radiobutton(frame, text='Exponential', variable=self.Loss, value='exponential') #Options for the learning rate. self.LearningRate_Label", "= ttk.Radiobutton(frame, text='None', variable=self.MaxLeafNodes, value='none') self.MaxLeafNodes_Integer = StringVar() self.MaxLeafNodes_Integer.set('0') self.MaxLeafNodes_Integer_Button = ttk.Radiobutton(frame, text='Number:',", "= ttk.Label(frame, text='Max Features:') self.MaxFeatures = StringVar() self.MaxFeatures.set('none') self.MaxFeatures_Integer = StringVar() self.MaxFeatures_Float =", "= Spinbox(frame, from_=0.0, to=1.0, textvariable=self.MaxFeatures_Float, width=5, increment=0.01) self.MaxFeatures_Auto = ttk.Radiobutton(frame, text='Auto', variable=self.MaxFeatures, value='auto')", "options into the frame. self.Loss_Label.grid(column=0,row=0, sticky=(W)) self.Loss_Deviance.grid(column=1, row=0, sticky=(W)) self.Loss_Exponential.grid(column=2, row=0, sticky=(W)) self.LearningRate_Label.grid(column=0,", "value='exponential') #Options for the learning rate. self.LearningRate_Label = ttk.Label(frame, text=\"Learning Rate:\") self.LearningRate =", "#Display options for the Decision Tree Classifier. self.clear_frame(self.frame) #Insert the options into the", "sticky=(W)) self.MinSamplesLeaf_Box.grid(column=2, row=5, sticky=(W)) self.MinFractionLeaf_Label.grid(column=0, columnspan=2, row=6, sticky=(W)) self.MinFractionLeaf_Box.grid(column=2, row=6, sticky=(W)) self.Subsample_Label.grid(column=0, row=7,", "max # of leaf nodes self.MaxLeafNodes_Label = ttk.Label(frame, text='Max Leaf Nodes:') self.MaxLeafNodes =", "= ttk.Label(frame, text=\"Loss Function:\") self.Loss = StringVar() self.Loss.set('deviance') self.Loss_Deviance = ttk.Radiobutton(frame, text='Deviance', variable=self.Loss,", "is split. self.MinSamplesSplit_Label = ttk.Label(frame, text='Min Samples to Split:') self.MinSamplesSplit = StringVar() self.MinSamplesSplit.set('2')", "ttk.Radiobutton(frame, text='Auto', variable=self.MaxFeatures, value='auto') self.MaxFeatures_Log2 = ttk.Radiobutton(frame, text='Log2', variable=self.MaxFeatures, value='log2') #Options for the", "text='Max Features:') self.MaxFeatures = StringVar() self.MaxFeatures.set('none') self.MaxFeatures_Integer = StringVar() self.MaxFeatures_Float = StringVar() self.MaxFeatures_Float.set('0.1')", "self.MaxFeatures_Integer_Box.grid(column=1, row=10, sticky=(W)) self.MaxFeatures_Float_Button.grid(column=0, row=11, sticky=(W)) self.MaxFeatures_Float_Box.grid(column=1, row=11, sticky=(W)) self.MaxFeatures_Auto.grid(column=0, row=12, sticky=(W)) self.MaxFeatures_Log2.grid(column=0,", "sticky=(W)) self.MaxFeatures_Float_Button.grid(column=0, row=11, sticky=(W)) self.MaxFeatures_Float_Box.grid(column=1, row=11, sticky=(W)) self.MaxFeatures_Auto.grid(column=0, row=12, sticky=(W)) self.MaxFeatures_Log2.grid(column=0, row=12, sticky=(W))", "Spinbox(frame, textvariable=self.LearningRate, from_=0.0, to=1.0, increment=0.01, width=5) #Options for the number of boosting stages.", "ttk.Radiobutton(frame, text='Percentage:', variable=self.MaxFeatures, value='float') self.MaxFeatures_Float_Box = Spinbox(frame, from_=0.0, to=1.0, textvariable=self.MaxFeatures_Float, width=5, increment=0.01) self.MaxFeatures_Auto", "self.MaxFeatures.set('none') self.MaxFeatures_Integer = StringVar() self.MaxFeatures_Float = StringVar() self.MaxFeatures_Float.set('0.1') self.MaxFeatures_None = ttk.Radiobutton(frame, text='None', variable=self.MaxFeatures,", "Algorithm from tkinter import * from tkinter import ttk class Gradient_Boosting_Classifier(Algorithm): def __init__(self,", "ttk.Label(frame, text='Max Depth:') self.MaxDepth = StringVar() self.MaxDepth.set('0') self.MaxDepth_Box = ttk.Entry(frame, textvariable=self.MaxDepth, width=7) #Options", "of leaf nodes self.MaxLeafNodes_Label = ttk.Label(frame, text='Max Leaf Nodes:') self.MaxLeafNodes = StringVar() self.MaxLeafNodes.set('none')", "self.MaxFeatures_Float.set('0.1') self.MaxFeatures_None = ttk.Radiobutton(frame, text='None', variable=self.MaxFeatures, value='none') self.MaxFeatures_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxFeatures, value='integer')", "increment=0.01, width=5) #Options for the number of boosting stages. self.Estimators_Label = ttk.Label(frame, text='#", "number of leaf nodes self.MinSamplesLeaf_Label = ttk.Label(frame, text='Min # of Leaf Nodes:') self.MinSamplesLeaf", "= StringVar() self.Subsample.set('1.0') self.Subsample_Box = Spinbox(frame, from_=0.0, to=1.0, increment=0.01, textvariable=self.Subsample, width=5) #Options for", "columnspan=2, row=6, sticky=(W)) self.MinFractionLeaf_Box.grid(column=2, row=6, sticky=(W)) self.Subsample_Label.grid(column=0, row=7, sticky=(W)) self.Subsample_Box.grid(column=1, row=7, sticky=(W)) self.MaxFeatures_Label.grid(column=0,", "= StringVar() self.MinSamplesSplit.set('2') self.MinSamplesSplit_Box = ttk.Entry(frame, textvariable=self.MinSamplesSplit, width=7) #Options for the minimum number", "self.MinSamplesLeaf_Box = ttk.Entry(frame, textvariable=self.MinSamplesLeaf, width=7) #Options for the minimum fraction of leaf nodes", "value='integer') self.MaxLeafNodes_Integer_Box = ttk.Entry(frame, textvariable=self.MaxLeafNodes_Integer, width=7) #Options for verbosity self.Verbose_Label = ttk.Label(frame, text='Verbose", "internal node is split. self.MinSamplesSplit_Label = ttk.Label(frame, text='Min Samples to Split:') self.MinSamplesSplit =", "self.frame = frame self.name = \"Gradient Boosting Classifier\" #Options for the loss criteria.", "= StringVar() self.MaxLeafNodes_Integer.set('0') self.MaxLeafNodes_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxLeafNodes, value='integer') self.MaxLeafNodes_Integer_Box = ttk.Entry(frame, textvariable=self.MaxLeafNodes_Integer,", "Leaf Nodes:') self.MaxLeafNodes = StringVar() self.MaxLeafNodes.set('none') self.MaxLeafNodes_None = ttk.Radiobutton(frame, text='None', variable=self.MaxLeafNodes, value='none') self.MaxLeafNodes_Integer", "#Options for max features. self.MaxFeatures_Label = ttk.Label(frame, text='Max Features:') self.MaxFeatures = StringVar() self.MaxFeatures.set('none')", "value='float') self.MaxFeatures_Float_Box = Spinbox(frame, from_=0.0, to=1.0, textvariable=self.MaxFeatures_Float, width=5, increment=0.01) self.MaxFeatures_Auto = ttk.Radiobutton(frame, text='Auto',", "Spinbox(frame, from_=0.0, to=1.0, textvariable=self.MaxFeatures_Float, width=5, increment=0.01) self.MaxFeatures_Auto = ttk.Radiobutton(frame, text='Auto', variable=self.MaxFeatures, value='auto') self.MaxFeatures_Log2", "frame. self.Loss_Label.grid(column=0,row=0, sticky=(W)) self.Loss_Deviance.grid(column=1, row=0, sticky=(W)) self.Loss_Exponential.grid(column=2, row=0, sticky=(W)) self.LearningRate_Label.grid(column=0, row=1, sticky=(W)) self.LearningRate_Box.grid(column=1,", "Split:') self.MinSamplesSplit = StringVar() self.MinSamplesSplit.set('2') self.MinSamplesSplit_Box = ttk.Entry(frame, textvariable=self.MinSamplesSplit, width=7) #Options for the", "self.Subsample_Box = Spinbox(frame, from_=0.0, to=1.0, increment=0.01, textvariable=self.Subsample, width=5) #Options for max features. self.MaxFeatures_Label", "#Options for the learning rate. self.LearningRate_Label = ttk.Label(frame, text=\"Learning Rate:\") self.LearningRate = StringVar()", "nodes self.MaxLeafNodes_Label = ttk.Label(frame, text='Max Leaf Nodes:') self.MaxLeafNodes = StringVar() self.MaxLeafNodes.set('none') self.MaxLeafNodes_None =", "self.MaxLeafNodes.set('none') self.MaxLeafNodes_None = ttk.Radiobutton(frame, text='None', variable=self.MaxLeafNodes, value='none') self.MaxLeafNodes_Integer = StringVar() self.MaxLeafNodes_Integer.set('0') self.MaxLeafNodes_Integer_Button =", "= ttk.Label(frame, text='Min Samples to Split:') self.MinSamplesSplit = StringVar() self.MinSamplesSplit.set('2') self.MinSamplesSplit_Box = ttk.Entry(frame,", "self.LearningRate_Label = ttk.Label(frame, text=\"Learning Rate:\") self.LearningRate = StringVar() self.LearningRate.set('0.1') self.LearningRate_Box = Spinbox(frame, textvariable=self.LearningRate,", "self.MaxFeatures = StringVar() self.MaxFeatures.set('none') self.MaxFeatures_Integer = StringVar() self.MaxFeatures_Float = StringVar() self.MaxFeatures_Float.set('0.1') self.MaxFeatures_None =", "ttk.Entry(frame, textvariable=self.Verbose, width=7) def Display_Options(self): #Display options for the Decision Tree Classifier. self.clear_frame(self.frame)", "Stages:') self.Estimators = StringVar() self.Estimators.set('100') self.Estimators_Box = ttk.Entry(frame, textvariable=self.Estimators, width=7) #Options for the", "Level:') self.Verbose = StringVar() self.Verbose.set('0') self.Verbose_Box = ttk.Entry(frame, textvariable=self.Verbose, width=7) def Display_Options(self): #Display", "text='Auto', variable=self.MaxFeatures, value='auto') self.MaxFeatures_Log2 = ttk.Radiobutton(frame, text='Log2', variable=self.MaxFeatures, value='log2') #Options for the max", "Decision Tree Classifier. self.clear_frame(self.frame) #Insert the options into the frame. self.Loss_Label.grid(column=0,row=0, sticky=(W)) self.Loss_Deviance.grid(column=1,", "class Gradient_Boosting_Classifier(Algorithm): def __init__(self, frame): self.frame = frame self.name = \"Gradient Boosting Classifier\"", "for the max depth self.MaxDepth_Label = ttk.Label(frame, text='Max Depth:') self.MaxDepth = StringVar() self.MaxDepth.set('0')", "row=0, sticky=(W)) self.Loss_Exponential.grid(column=2, row=0, sticky=(W)) self.LearningRate_Label.grid(column=0, row=1, sticky=(W)) self.LearningRate_Box.grid(column=1, row=1, sticky=(W)) self.Estimators_Label.grid(column=0, row=2,", "StringVar() self.Subsample.set('1.0') self.Subsample_Box = Spinbox(frame, from_=0.0, to=1.0, increment=0.01, textvariable=self.Subsample, width=5) #Options for max", "value='none') self.MaxFeatures_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxFeatures, value='integer') self.MaxFeatures_Integer_Box = ttk.Entry(frame, textvariable=self.MaxFeatures_Integer, width=7) self.MaxFeatures_Float_Button", "StringVar() self.MaxFeatures_Float.set('0.1') self.MaxFeatures_None = ttk.Radiobutton(frame, text='None', variable=self.MaxFeatures, value='none') self.MaxFeatures_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxFeatures,", "Nodes:') self.MinSamplesLeaf = StringVar() self.MinSamplesLeaf.set('1') self.MinSamplesLeaf_Box = ttk.Entry(frame, textvariable=self.MinSamplesLeaf, width=7) #Options for the", "self.MaxFeatures_Float_Button.grid(column=0, row=11, sticky=(W)) self.MaxFeatures_Float_Box.grid(column=1, row=11, sticky=(W)) self.MaxFeatures_Auto.grid(column=0, row=12, sticky=(W)) self.MaxFeatures_Log2.grid(column=0, row=12, sticky=(W)) self.MaxLeafNodes_Label.grid(column=0,", "self.MinSamplesSplit = StringVar() self.MinSamplesSplit.set('2') self.MinSamplesSplit_Box = ttk.Entry(frame, textvariable=self.MinSamplesSplit, width=7) #Options for the minimum", "sticky=(W)) self.MinSamplesLeaf_Label.grid(column=0, columnspan=2, row=5, sticky=(W)) self.MinSamplesLeaf_Box.grid(column=2, row=5, sticky=(W)) self.MinFractionLeaf_Label.grid(column=0, columnspan=2, row=6, sticky=(W)) self.MinFractionLeaf_Box.grid(column=2,", "textvariable=self.Estimators, width=7) #Options for the max depth self.MaxDepth_Label = ttk.Label(frame, text='Max Depth:') self.MaxDepth", "#Options for the max # of leaf nodes self.MaxLeafNodes_Label = ttk.Label(frame, text='Max Leaf", "#Options for the max depth self.MaxDepth_Label = ttk.Label(frame, text='Max Depth:') self.MaxDepth = StringVar()", "StringVar() self.Verbose.set('0') self.Verbose_Box = ttk.Entry(frame, textvariable=self.Verbose, width=7) def Display_Options(self): #Display options for the", "self.MinSamplesSplit_Box = ttk.Entry(frame, textvariable=self.MinSamplesSplit, width=7) #Options for the minimum number of leaf nodes", "self.LearningRate_Label.grid(column=0, row=1, sticky=(W)) self.LearningRate_Box.grid(column=1, row=1, sticky=(W)) self.Estimators_Label.grid(column=0, row=2, sticky=(W)) self.Estimators_Box.grid(column=1, row=2, sticky=(W)) self.MaxDepth_Label.grid(column=0,", "into the frame. self.Loss_Label.grid(column=0,row=0, sticky=(W)) self.Loss_Deviance.grid(column=1, row=0, sticky=(W)) self.Loss_Exponential.grid(column=2, row=0, sticky=(W)) self.LearningRate_Label.grid(column=0, row=1,", "row=7, sticky=(W)) self.Subsample_Box.grid(column=1, row=7, sticky=(W)) self.MaxFeatures_Label.grid(column=0, row=8, sticky=(W)) self.MaxFeatures_None.grid(column=0, row=9, sticky=(W)) self.MaxFeatures_Integer_Button.grid(column=0, row=10,", "to=1.0, textvariable=self.MaxFeatures_Float, width=5, increment=0.01) self.MaxFeatures_Auto = ttk.Radiobutton(frame, text='Auto', variable=self.MaxFeatures, value='auto') self.MaxFeatures_Log2 = ttk.Radiobutton(frame,", "self.MaxLeafNodes_Integer = StringVar() self.MaxLeafNodes_Integer.set('0') self.MaxLeafNodes_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxLeafNodes, value='integer') self.MaxLeafNodes_Integer_Box = ttk.Entry(frame,", "columnspan=2, row=4, sticky=(W)) self.MinSamplesSplit_Box.grid(column=2, row=4, sticky=(W)) self.MinSamplesLeaf_Label.grid(column=0, columnspan=2, row=5, sticky=(W)) self.MinSamplesLeaf_Box.grid(column=2, row=5, sticky=(W))", "rate. self.LearningRate_Label = ttk.Label(frame, text=\"Learning Rate:\") self.LearningRate = StringVar() self.LearningRate.set('0.1') self.LearningRate_Box = Spinbox(frame,", "row=6, sticky=(W)) self.MinFractionLeaf_Box.grid(column=2, row=6, sticky=(W)) self.Subsample_Label.grid(column=0, row=7, sticky=(W)) self.Subsample_Box.grid(column=1, row=7, sticky=(W)) self.MaxFeatures_Label.grid(column=0, row=8,", "= ttk.Label(frame, text='Max Depth:') self.MaxDepth = StringVar() self.MaxDepth.set('0') self.MaxDepth_Box = ttk.Entry(frame, textvariable=self.MaxDepth, width=7)", "width=7) #Options for the minimum fraction of leaf nodes self.MinFractionLeaf_Label = ttk.Label(frame, text='Min", "self.MaxDepth = StringVar() self.MaxDepth.set('0') self.MaxDepth_Box = ttk.Entry(frame, textvariable=self.MaxDepth, width=7) #Options for the minimum", "number of boosting stages. self.Estimators_Label = ttk.Label(frame, text='# of Stages:') self.Estimators = StringVar()", "= StringVar() self.Estimators.set('100') self.Estimators_Box = ttk.Entry(frame, textvariable=self.Estimators, width=7) #Options for the max depth", "leaf nodes self.MinSamplesLeaf_Label = ttk.Label(frame, text='Min # of Leaf Nodes:') self.MinSamplesLeaf = StringVar()", "self.MaxFeatures_Float_Box.grid(column=1, row=11, sticky=(W)) self.MaxFeatures_Auto.grid(column=0, row=12, sticky=(W)) self.MaxFeatures_Log2.grid(column=0, row=12, sticky=(W)) self.MaxLeafNodes_Label.grid(column=0, row=13, sticky=(W)) self.MaxLeafNodes_None.grid(column=0,", "of Leaf Nodes:') self.MinFractionLeaf = StringVar() self.MinFractionLeaf.set('0.0') self.MinFractionLeaf_Box = ttk.Entry(frame, textvariable=self.MinFractionLeaf, width=7) #Options", "sticky=(W)) self.MaxLeafNodes_None.grid(column=0, row=14, sticky=(W)) self.MaxLeafNodes_Integer_Button.grid(column=0, row=15, sticky=(W)) self.MaxLeafNodes_Integer_Box.grid(column=1, row=15, sticky=(W)) self.Verbose_Label.grid(column=0, row=16, sticky=(W))", "self.MaxDepth_Box.grid(column=1, row=3, sticky=(W)) self.MinSamplesSplit_Label.grid(column=0, columnspan=2, row=4, sticky=(W)) self.MinSamplesSplit_Box.grid(column=2, row=4, sticky=(W)) self.MinSamplesLeaf_Label.grid(column=0, columnspan=2, row=5,", "ttk.Radiobutton(frame, text='Exponential', variable=self.Loss, value='exponential') #Options for the learning rate. self.LearningRate_Label = ttk.Label(frame, text=\"Learning", "= Spinbox(frame, from_=0.0, to=1.0, increment=0.01, textvariable=self.Subsample, width=5) #Options for max features. self.MaxFeatures_Label =", "# of Leaf Nodes:') self.MinSamplesLeaf = StringVar() self.MinSamplesLeaf.set('1') self.MinSamplesLeaf_Box = ttk.Entry(frame, textvariable=self.MinSamplesLeaf, width=7)", "text='Max Leaf Nodes:') self.MaxLeafNodes = StringVar() self.MaxLeafNodes.set('none') self.MaxLeafNodes_None = ttk.Radiobutton(frame, text='None', variable=self.MaxLeafNodes, value='none')", "row=9, sticky=(W)) self.MaxFeatures_Integer_Button.grid(column=0, row=10, sticky=(W)) self.MaxFeatures_Integer_Box.grid(column=1, row=10, sticky=(W)) self.MaxFeatures_Float_Button.grid(column=0, row=11, sticky=(W)) self.MaxFeatures_Float_Box.grid(column=1, row=11,", "criteria. self.Loss_Label = ttk.Label(frame, text=\"Loss Function:\") self.Loss = StringVar() self.Loss.set('deviance') self.Loss_Deviance = ttk.Radiobutton(frame,", "import * from tkinter import ttk class Gradient_Boosting_Classifier(Algorithm): def __init__(self, frame): self.frame =", "self.MaxFeatures_Float_Button = ttk.Radiobutton(frame, text='Percentage:', variable=self.MaxFeatures, value='float') self.MaxFeatures_Float_Box = Spinbox(frame, from_=0.0, to=1.0, textvariable=self.MaxFeatures_Float, width=5,", "of leaf nodes self.MinSamplesLeaf_Label = ttk.Label(frame, text='Min # of Leaf Nodes:') self.MinSamplesLeaf =", "self.Loss_Label.grid(column=0,row=0, sticky=(W)) self.Loss_Deviance.grid(column=1, row=0, sticky=(W)) self.Loss_Exponential.grid(column=2, row=0, sticky=(W)) self.LearningRate_Label.grid(column=0, row=1, sticky=(W)) self.LearningRate_Box.grid(column=1, row=1,", "width=7) #Options for batch size self.Subsample_Label = ttk.Label(frame, text='Batch Size:') self.Subsample = StringVar()", "textvariable=self.MaxFeatures_Integer, width=7) self.MaxFeatures_Float_Button = ttk.Radiobutton(frame, text='Percentage:', variable=self.MaxFeatures, value='float') self.MaxFeatures_Float_Box = Spinbox(frame, from_=0.0, to=1.0,", "= ttk.Radiobutton(frame, text='Percentage:', variable=self.MaxFeatures, value='float') self.MaxFeatures_Float_Box = Spinbox(frame, from_=0.0, to=1.0, textvariable=self.MaxFeatures_Float, width=5, increment=0.01)", "ttk.Entry(frame, textvariable=self.MaxLeafNodes_Integer, width=7) #Options for verbosity self.Verbose_Label = ttk.Label(frame, text='Verbose Level:') self.Verbose =", "Spinbox(frame, from_=0.0, to=1.0, increment=0.01, textvariable=self.Subsample, width=5) #Options for max features. self.MaxFeatures_Label = ttk.Label(frame,", "sticky=(W)) self.MinFractionLeaf_Box.grid(column=2, row=6, sticky=(W)) self.Subsample_Label.grid(column=0, row=7, sticky=(W)) self.Subsample_Box.grid(column=1, row=7, sticky=(W)) self.MaxFeatures_Label.grid(column=0, row=8, sticky=(W))", "nodes self.MinSamplesLeaf_Label = ttk.Label(frame, text='Min # of Leaf Nodes:') self.MinSamplesLeaf = StringVar() self.MinSamplesLeaf.set('1')", "an internal node is split. self.MinSamplesSplit_Label = ttk.Label(frame, text='Min Samples to Split:') self.MinSamplesSplit", "ttk.Radiobutton(frame, text='Deviance', variable=self.Loss, value='deviance') self.Loss_Exponential = ttk.Radiobutton(frame, text='Exponential', variable=self.Loss, value='exponential') #Options for the", "ttk.Entry(frame, textvariable=self.MinSamplesSplit, width=7) #Options for the minimum number of leaf nodes self.MinSamplesLeaf_Label =", "self.MaxLeafNodes_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxLeafNodes, value='integer') self.MaxLeafNodes_Integer_Box = ttk.Entry(frame, textvariable=self.MaxLeafNodes_Integer, width=7) #Options for", "#Options for the minimum fraction of leaf nodes self.MinFractionLeaf_Label = ttk.Label(frame, text='Min %", "row=5, sticky=(W)) self.MinSamplesLeaf_Box.grid(column=2, row=5, sticky=(W)) self.MinFractionLeaf_Label.grid(column=0, columnspan=2, row=6, sticky=(W)) self.MinFractionLeaf_Box.grid(column=2, row=6, sticky=(W)) self.Subsample_Label.grid(column=0,", "ttk.Radiobutton(frame, text='None', variable=self.MaxLeafNodes, value='none') self.MaxLeafNodes_Integer = StringVar() self.MaxLeafNodes_Integer.set('0') self.MaxLeafNodes_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxLeafNodes,", "frame): self.frame = frame self.name = \"Gradient Boosting Classifier\" #Options for the loss", "= ttk.Label(frame, text='Batch Size:') self.Subsample = StringVar() self.Subsample.set('1.0') self.Subsample_Box = Spinbox(frame, from_=0.0, to=1.0,", "columnspan=2, row=5, sticky=(W)) self.MinSamplesLeaf_Box.grid(column=2, row=5, sticky=(W)) self.MinFractionLeaf_Label.grid(column=0, columnspan=2, row=6, sticky=(W)) self.MinFractionLeaf_Box.grid(column=2, row=6, sticky=(W))", "self.Subsample_Box.grid(column=1, row=7, sticky=(W)) self.MaxFeatures_Label.grid(column=0, row=8, sticky=(W)) self.MaxFeatures_None.grid(column=0, row=9, sticky=(W)) self.MaxFeatures_Integer_Button.grid(column=0, row=10, sticky=(W)) self.MaxFeatures_Integer_Box.grid(column=1,", "sticky=(W)) self.LearningRate_Label.grid(column=0, row=1, sticky=(W)) self.LearningRate_Box.grid(column=1, row=1, sticky=(W)) self.Estimators_Label.grid(column=0, row=2, sticky=(W)) self.Estimators_Box.grid(column=1, row=2, sticky=(W))", "the number of boosting stages. self.Estimators_Label = ttk.Label(frame, text='# of Stages:') self.Estimators =", "the minimum number of leaf nodes self.MinSamplesLeaf_Label = ttk.Label(frame, text='Min # of Leaf", "textvariable=self.MinSamplesLeaf, width=7) #Options for the minimum fraction of leaf nodes self.MinFractionLeaf_Label = ttk.Label(frame,", "self.Estimators_Box = ttk.Entry(frame, textvariable=self.Estimators, width=7) #Options for the max depth self.MaxDepth_Label = ttk.Label(frame,", "Tree Classifier. self.clear_frame(self.frame) #Insert the options into the frame. self.Loss_Label.grid(column=0,row=0, sticky=(W)) self.Loss_Deviance.grid(column=1, row=0,", "depth self.MaxDepth_Label = ttk.Label(frame, text='Max Depth:') self.MaxDepth = StringVar() self.MaxDepth.set('0') self.MaxDepth_Box = ttk.Entry(frame,", "textvariable=self.Subsample, width=5) #Options for max features. self.MaxFeatures_Label = ttk.Label(frame, text='Max Features:') self.MaxFeatures =", "the Decision Tree Classifier. self.clear_frame(self.frame) #Insert the options into the frame. self.Loss_Label.grid(column=0,row=0, sticky=(W))", "ttk.Label(frame, text=\"Learning Rate:\") self.LearningRate = StringVar() self.LearningRate.set('0.1') self.LearningRate_Box = Spinbox(frame, textvariable=self.LearningRate, from_=0.0, to=1.0,", "StringVar() self.Estimators.set('100') self.Estimators_Box = ttk.Entry(frame, textvariable=self.Estimators, width=7) #Options for the max depth self.MaxDepth_Label", "sticky=(W)) self.MaxLeafNodes_Label.grid(column=0, row=13, sticky=(W)) self.MaxLeafNodes_None.grid(column=0, row=14, sticky=(W)) self.MaxLeafNodes_Integer_Button.grid(column=0, row=15, sticky=(W)) self.MaxLeafNodes_Integer_Box.grid(column=1, row=15, sticky=(W))", "= ttk.Radiobutton(frame, text='None', variable=self.MaxFeatures, value='none') self.MaxFeatures_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxFeatures, value='integer') self.MaxFeatures_Integer_Box =", "the frame. self.Loss_Label.grid(column=0,row=0, sticky=(W)) self.Loss_Deviance.grid(column=1, row=0, sticky=(W)) self.Loss_Exponential.grid(column=2, row=0, sticky=(W)) self.LearningRate_Label.grid(column=0, row=1, sticky=(W))", "value='integer') self.MaxFeatures_Integer_Box = ttk.Entry(frame, textvariable=self.MaxFeatures_Integer, width=7) self.MaxFeatures_Float_Button = ttk.Radiobutton(frame, text='Percentage:', variable=self.MaxFeatures, value='float') self.MaxFeatures_Float_Box", "for verbosity self.Verbose_Label = ttk.Label(frame, text='Verbose Level:') self.Verbose = StringVar() self.Verbose.set('0') self.Verbose_Box =", "variable=self.MaxFeatures, value='none') self.MaxFeatures_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxFeatures, value='integer') self.MaxFeatures_Integer_Box = ttk.Entry(frame, textvariable=self.MaxFeatures_Integer, width=7)", "sticky=(W)) self.Subsample_Box.grid(column=1, row=7, sticky=(W)) self.MaxFeatures_Label.grid(column=0, row=8, sticky=(W)) self.MaxFeatures_None.grid(column=0, row=9, sticky=(W)) self.MaxFeatures_Integer_Button.grid(column=0, row=10, sticky=(W))", "width=7) #Options for the max depth self.MaxDepth_Label = ttk.Label(frame, text='Max Depth:') self.MaxDepth =", "ttk.Entry(frame, textvariable=self.MaxFeatures_Integer, width=7) self.MaxFeatures_Float_Button = ttk.Radiobutton(frame, text='Percentage:', variable=self.MaxFeatures, value='float') self.MaxFeatures_Float_Box = Spinbox(frame, from_=0.0,", "for max features. self.MaxFeatures_Label = ttk.Label(frame, text='Max Features:') self.MaxFeatures = StringVar() self.MaxFeatures.set('none') self.MaxFeatures_Integer", "self.Subsample_Label.grid(column=0, row=7, sticky=(W)) self.Subsample_Box.grid(column=1, row=7, sticky=(W)) self.MaxFeatures_Label.grid(column=0, row=8, sticky=(W)) self.MaxFeatures_None.grid(column=0, row=9, sticky=(W)) self.MaxFeatures_Integer_Button.grid(column=0,", "textvariable=self.MaxLeafNodes_Integer, width=7) #Options for verbosity self.Verbose_Label = ttk.Label(frame, text='Verbose Level:') self.Verbose = StringVar()", "the learning rate. self.LearningRate_Label = ttk.Label(frame, text=\"Learning Rate:\") self.LearningRate = StringVar() self.LearningRate.set('0.1') self.LearningRate_Box", "self.MaxFeatures_Float_Box = Spinbox(frame, from_=0.0, to=1.0, textvariable=self.MaxFeatures_Float, width=5, increment=0.01) self.MaxFeatures_Auto = ttk.Radiobutton(frame, text='Auto', variable=self.MaxFeatures,", "minimum number of leaf nodes self.MinSamplesLeaf_Label = ttk.Label(frame, text='Min # of Leaf Nodes:')", "row=5, sticky=(W)) self.MinFractionLeaf_Label.grid(column=0, columnspan=2, row=6, sticky=(W)) self.MinFractionLeaf_Box.grid(column=2, row=6, sticky=(W)) self.Subsample_Label.grid(column=0, row=7, sticky=(W)) self.Subsample_Box.grid(column=1,", "= ttk.Entry(frame, textvariable=self.MaxFeatures_Integer, width=7) self.MaxFeatures_Float_Button = ttk.Radiobutton(frame, text='Percentage:', variable=self.MaxFeatures, value='float') self.MaxFeatures_Float_Box = Spinbox(frame,", "of leaf nodes self.MinFractionLeaf_Label = ttk.Label(frame, text='Min % of Leaf Nodes:') self.MinFractionLeaf =", "text='Exponential', variable=self.Loss, value='exponential') #Options for the learning rate. self.LearningRate_Label = ttk.Label(frame, text=\"Learning Rate:\")", "self.MaxDepth_Label = ttk.Label(frame, text='Max Depth:') self.MaxDepth = StringVar() self.MaxDepth.set('0') self.MaxDepth_Box = ttk.Entry(frame, textvariable=self.MaxDepth,", "the minimum fraction of leaf nodes self.MinFractionLeaf_Label = ttk.Label(frame, text='Min % of Leaf", "= StringVar() self.MinFractionLeaf.set('0.0') self.MinFractionLeaf_Box = ttk.Entry(frame, textvariable=self.MinFractionLeaf, width=7) #Options for batch size self.Subsample_Label", "self.Loss_Exponential = ttk.Radiobutton(frame, text='Exponential', variable=self.Loss, value='exponential') #Options for the learning rate. self.LearningRate_Label =", "sticky=(W)) self.Estimators_Box.grid(column=1, row=2, sticky=(W)) self.MaxDepth_Label.grid(column=0, row=3, sticky=(W)) self.MaxDepth_Box.grid(column=1, row=3, sticky=(W)) self.MinSamplesSplit_Label.grid(column=0, columnspan=2, row=4,", "self.Estimators = StringVar() self.Estimators.set('100') self.Estimators_Box = ttk.Entry(frame, textvariable=self.Estimators, width=7) #Options for the max", "self.MinFractionLeaf_Box.grid(column=2, row=6, sticky=(W)) self.Subsample_Label.grid(column=0, row=7, sticky=(W)) self.Subsample_Box.grid(column=1, row=7, sticky=(W)) self.MaxFeatures_Label.grid(column=0, row=8, sticky=(W)) self.MaxFeatures_None.grid(column=0,", "Gradient_Boosting_Classifier(Algorithm): def __init__(self, frame): self.frame = frame self.name = \"Gradient Boosting Classifier\" #Options", "textvariable=self.MinFractionLeaf, width=7) #Options for batch size self.Subsample_Label = ttk.Label(frame, text='Batch Size:') self.Subsample =", "number of samples before an internal node is split. self.MinSamplesSplit_Label = ttk.Label(frame, text='Min", "the minimum number of samples before an internal node is split. self.MinSamplesSplit_Label =", "Boosting Classifier\" #Options for the loss criteria. self.Loss_Label = ttk.Label(frame, text=\"Loss Function:\") self.Loss", "the options into the frame. self.Loss_Label.grid(column=0,row=0, sticky=(W)) self.Loss_Deviance.grid(column=1, row=0, sticky=(W)) self.Loss_Exponential.grid(column=2, row=0, sticky=(W))", "from_=0.0, to=1.0, textvariable=self.MaxFeatures_Float, width=5, increment=0.01) self.MaxFeatures_Auto = ttk.Radiobutton(frame, text='Auto', variable=self.MaxFeatures, value='auto') self.MaxFeatures_Log2 =", "learning rate. self.LearningRate_Label = ttk.Label(frame, text=\"Learning Rate:\") self.LearningRate = StringVar() self.LearningRate.set('0.1') self.LearningRate_Box =", "text='Verbose Level:') self.Verbose = StringVar() self.Verbose.set('0') self.Verbose_Box = ttk.Entry(frame, textvariable=self.Verbose, width=7) def Display_Options(self):", "= ttk.Radiobutton(frame, text='Deviance', variable=self.Loss, value='deviance') self.Loss_Exponential = ttk.Radiobutton(frame, text='Exponential', variable=self.Loss, value='exponential') #Options for", "= ttk.Entry(frame, textvariable=self.MinFractionLeaf, width=7) #Options for batch size self.Subsample_Label = ttk.Label(frame, text='Batch Size:')", "of samples before an internal node is split. self.MinSamplesSplit_Label = ttk.Label(frame, text='Min Samples", "= ttk.Label(frame, text='Min # of Leaf Nodes:') self.MinSamplesLeaf = StringVar() self.MinSamplesLeaf.set('1') self.MinSamplesLeaf_Box =", "Leaf Nodes:') self.MinFractionLeaf = StringVar() self.MinFractionLeaf.set('0.0') self.MinFractionLeaf_Box = ttk.Entry(frame, textvariable=self.MinFractionLeaf, width=7) #Options for", "text='Max Depth:') self.MaxDepth = StringVar() self.MaxDepth.set('0') self.MaxDepth_Box = ttk.Entry(frame, textvariable=self.MaxDepth, width=7) #Options for", "self.MinSamplesLeaf_Label = ttk.Label(frame, text='Min # of Leaf Nodes:') self.MinSamplesLeaf = StringVar() self.MinSamplesLeaf.set('1') self.MinSamplesLeaf_Box", "nodes self.MinFractionLeaf_Label = ttk.Label(frame, text='Min % of Leaf Nodes:') self.MinFractionLeaf = StringVar() self.MinFractionLeaf.set('0.0')", "sticky=(W)) self.Loss_Exponential.grid(column=2, row=0, sticky=(W)) self.LearningRate_Label.grid(column=0, row=1, sticky=(W)) self.LearningRate_Box.grid(column=1, row=1, sticky=(W)) self.Estimators_Label.grid(column=0, row=2, sticky=(W))", "self.Verbose_Box = ttk.Entry(frame, textvariable=self.Verbose, width=7) def Display_Options(self): #Display options for the Decision Tree", "of Leaf Nodes:') self.MinSamplesLeaf = StringVar() self.MinSamplesLeaf.set('1') self.MinSamplesLeaf_Box = ttk.Entry(frame, textvariable=self.MinSamplesLeaf, width=7) #Options", "= Spinbox(frame, textvariable=self.LearningRate, from_=0.0, to=1.0, increment=0.01, width=5) #Options for the number of boosting", "self.Loss_Deviance = ttk.Radiobutton(frame, text='Deviance', variable=self.Loss, value='deviance') self.Loss_Exponential = ttk.Radiobutton(frame, text='Exponential', variable=self.Loss, value='exponential') #Options", "self.Subsample = StringVar() self.Subsample.set('1.0') self.Subsample_Box = Spinbox(frame, from_=0.0, to=1.0, increment=0.01, textvariable=self.Subsample, width=5) #Options", "self.MinSamplesSplit_Label = ttk.Label(frame, text='Min Samples to Split:') self.MinSamplesSplit = StringVar() self.MinSamplesSplit.set('2') self.MinSamplesSplit_Box =", "#Options for the loss criteria. self.Loss_Label = ttk.Label(frame, text=\"Loss Function:\") self.Loss = StringVar()", "self.MaxFeatures_Log2 = ttk.Radiobutton(frame, text='Log2', variable=self.MaxFeatures, value='log2') #Options for the max # of leaf", "to=1.0, increment=0.01, width=5) #Options for the number of boosting stages. self.Estimators_Label = ttk.Label(frame,", "#Options for verbosity self.Verbose_Label = ttk.Label(frame, text='Verbose Level:') self.Verbose = StringVar() self.Verbose.set('0') self.Verbose_Box", "self.MaxFeatures_Integer_Box = ttk.Entry(frame, textvariable=self.MaxFeatures_Integer, width=7) self.MaxFeatures_Float_Button = ttk.Radiobutton(frame, text='Percentage:', variable=self.MaxFeatures, value='float') self.MaxFeatures_Float_Box =", "sticky=(W)) self.Estimators_Label.grid(column=0, row=2, sticky=(W)) self.Estimators_Box.grid(column=1, row=2, sticky=(W)) self.MaxDepth_Label.grid(column=0, row=3, sticky=(W)) self.MaxDepth_Box.grid(column=1, row=3, sticky=(W))", "row=13, sticky=(W)) self.MaxLeafNodes_None.grid(column=0, row=14, sticky=(W)) self.MaxLeafNodes_Integer_Button.grid(column=0, row=15, sticky=(W)) self.MaxLeafNodes_Integer_Box.grid(column=1, row=15, sticky=(W)) self.Verbose_Label.grid(column=0, row=16,", "from_=0.0, to=1.0, increment=0.01, width=5) #Options for the number of boosting stages. self.Estimators_Label =", "self.MaxFeatures_Float = StringVar() self.MaxFeatures_Float.set('0.1') self.MaxFeatures_None = ttk.Radiobutton(frame, text='None', variable=self.MaxFeatures, value='none') self.MaxFeatures_Integer_Button = ttk.Radiobutton(frame,", "ttk.Label(frame, text='Min % of Leaf Nodes:') self.MinFractionLeaf = StringVar() self.MinFractionLeaf.set('0.0') self.MinFractionLeaf_Box = ttk.Entry(frame,", "samples before an internal node is split. self.MinSamplesSplit_Label = ttk.Label(frame, text='Min Samples to", "= ttk.Entry(frame, textvariable=self.MaxLeafNodes_Integer, width=7) #Options for verbosity self.Verbose_Label = ttk.Label(frame, text='Verbose Level:') self.Verbose", "sticky=(W)) self.Loss_Deviance.grid(column=1, row=0, sticky=(W)) self.Loss_Exponential.grid(column=2, row=0, sticky=(W)) self.LearningRate_Label.grid(column=0, row=1, sticky=(W)) self.LearningRate_Box.grid(column=1, row=1, sticky=(W))", "row=0, sticky=(W)) self.LearningRate_Label.grid(column=0, row=1, sticky=(W)) self.LearningRate_Box.grid(column=1, row=1, sticky=(W)) self.Estimators_Label.grid(column=0, row=2, sticky=(W)) self.Estimators_Box.grid(column=1, row=2,", "StringVar() self.MinSamplesSplit.set('2') self.MinSamplesSplit_Box = ttk.Entry(frame, textvariable=self.MinSamplesSplit, width=7) #Options for the minimum number of", "boosting stages. self.Estimators_Label = ttk.Label(frame, text='# of Stages:') self.Estimators = StringVar() self.Estimators.set('100') self.Estimators_Box", "before an internal node is split. self.MinSamplesSplit_Label = ttk.Label(frame, text='Min Samples to Split:')", "textvariable=self.MinSamplesSplit, width=7) #Options for the minimum number of leaf nodes self.MinSamplesLeaf_Label = ttk.Label(frame,", "the loss criteria. self.Loss_Label = ttk.Label(frame, text=\"Loss Function:\") self.Loss = StringVar() self.Loss.set('deviance') self.Loss_Deviance", "= StringVar() self.MinSamplesLeaf.set('1') self.MinSamplesLeaf_Box = ttk.Entry(frame, textvariable=self.MinSamplesLeaf, width=7) #Options for the minimum fraction", "self.MaxFeatures_Auto = ttk.Radiobutton(frame, text='Auto', variable=self.MaxFeatures, value='auto') self.MaxFeatures_Log2 = ttk.Radiobutton(frame, text='Log2', variable=self.MaxFeatures, value='log2') #Options", "Features:') self.MaxFeatures = StringVar() self.MaxFeatures.set('none') self.MaxFeatures_Integer = StringVar() self.MaxFeatures_Float = StringVar() self.MaxFeatures_Float.set('0.1') self.MaxFeatures_None", "self.MaxDepth.set('0') self.MaxDepth_Box = ttk.Entry(frame, textvariable=self.MaxDepth, width=7) #Options for the minimum number of samples", "row=12, sticky=(W)) self.MaxFeatures_Log2.grid(column=0, row=12, sticky=(W)) self.MaxLeafNodes_Label.grid(column=0, row=13, sticky=(W)) self.MaxLeafNodes_None.grid(column=0, row=14, sticky=(W)) self.MaxLeafNodes_Integer_Button.grid(column=0, row=15,", "row=11, sticky=(W)) self.MaxFeatures_Float_Box.grid(column=1, row=11, sticky=(W)) self.MaxFeatures_Auto.grid(column=0, row=12, sticky=(W)) self.MaxFeatures_Log2.grid(column=0, row=12, sticky=(W)) self.MaxLeafNodes_Label.grid(column=0, row=13,", "text='None', variable=self.MaxFeatures, value='none') self.MaxFeatures_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxFeatures, value='integer') self.MaxFeatures_Integer_Box = ttk.Entry(frame, textvariable=self.MaxFeatures_Integer,", "self.Estimators_Label.grid(column=0, row=2, sticky=(W)) self.Estimators_Box.grid(column=1, row=2, sticky=(W)) self.MaxDepth_Label.grid(column=0, row=3, sticky=(W)) self.MaxDepth_Box.grid(column=1, row=3, sticky=(W)) self.MinSamplesSplit_Label.grid(column=0,", "leaf nodes self.MaxLeafNodes_Label = ttk.Label(frame, text='Max Leaf Nodes:') self.MaxLeafNodes = StringVar() self.MaxLeafNodes.set('none') self.MaxLeafNodes_None", "max depth self.MaxDepth_Label = ttk.Label(frame, text='Max Depth:') self.MaxDepth = StringVar() self.MaxDepth.set('0') self.MaxDepth_Box =", "self.MaxFeatures_None = ttk.Radiobutton(frame, text='None', variable=self.MaxFeatures, value='none') self.MaxFeatures_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxFeatures, value='integer') self.MaxFeatures_Integer_Box", "text=\"Loss Function:\") self.Loss = StringVar() self.Loss.set('deviance') self.Loss_Deviance = ttk.Radiobutton(frame, text='Deviance', variable=self.Loss, value='deviance') self.Loss_Exponential", "#Options for the number of boosting stages. self.Estimators_Label = ttk.Label(frame, text='# of Stages:')", "ttk.Entry(frame, textvariable=self.MinSamplesLeaf, width=7) #Options for the minimum fraction of leaf nodes self.MinFractionLeaf_Label =", "= ttk.Entry(frame, textvariable=self.Verbose, width=7) def Display_Options(self): #Display options for the Decision Tree Classifier.", "self.Loss_Exponential.grid(column=2, row=0, sticky=(W)) self.LearningRate_Label.grid(column=0, row=1, sticky=(W)) self.LearningRate_Box.grid(column=1, row=1, sticky=(W)) self.Estimators_Label.grid(column=0, row=2, sticky=(W)) self.Estimators_Box.grid(column=1,", "StringVar() self.Loss.set('deviance') self.Loss_Deviance = ttk.Radiobutton(frame, text='Deviance', variable=self.Loss, value='deviance') self.Loss_Exponential = ttk.Radiobutton(frame, text='Exponential', variable=self.Loss,", "self.MaxFeatures_Log2.grid(column=0, row=12, sticky=(W)) self.MaxLeafNodes_Label.grid(column=0, row=13, sticky=(W)) self.MaxLeafNodes_None.grid(column=0, row=14, sticky=(W)) self.MaxLeafNodes_Integer_Button.grid(column=0, row=15, sticky=(W)) self.MaxLeafNodes_Integer_Box.grid(column=1,", "= StringVar() self.MaxFeatures_Float = StringVar() self.MaxFeatures_Float.set('0.1') self.MaxFeatures_None = ttk.Radiobutton(frame, text='None', variable=self.MaxFeatures, value='none') self.MaxFeatures_Integer_Button", "% of Leaf Nodes:') self.MinFractionLeaf = StringVar() self.MinFractionLeaf.set('0.0') self.MinFractionLeaf_Box = ttk.Entry(frame, textvariable=self.MinFractionLeaf, width=7)", "self.MaxDepth_Box = ttk.Entry(frame, textvariable=self.MaxDepth, width=7) #Options for the minimum number of samples before", "StringVar() self.MaxFeatures_Float = StringVar() self.MaxFeatures_Float.set('0.1') self.MaxFeatures_None = ttk.Radiobutton(frame, text='None', variable=self.MaxFeatures, value='none') self.MaxFeatures_Integer_Button =", "self.MinSamplesLeaf.set('1') self.MinSamplesLeaf_Box = ttk.Entry(frame, textvariable=self.MinSamplesLeaf, width=7) #Options for the minimum fraction of leaf", "self.Verbose_Label = ttk.Label(frame, text='Verbose Level:') self.Verbose = StringVar() self.Verbose.set('0') self.Verbose_Box = ttk.Entry(frame, textvariable=self.Verbose,", "ttk.Entry(frame, textvariable=self.Estimators, width=7) #Options for the max depth self.MaxDepth_Label = ttk.Label(frame, text='Max Depth:')", "= StringVar() self.MaxLeafNodes.set('none') self.MaxLeafNodes_None = ttk.Radiobutton(frame, text='None', variable=self.MaxLeafNodes, value='none') self.MaxLeafNodes_Integer = StringVar() self.MaxLeafNodes_Integer.set('0')", "row=3, sticky=(W)) self.MinSamplesSplit_Label.grid(column=0, columnspan=2, row=4, sticky=(W)) self.MinSamplesSplit_Box.grid(column=2, row=4, sticky=(W)) self.MinSamplesLeaf_Label.grid(column=0, columnspan=2, row=5, sticky=(W))", "ttk.Entry(frame, textvariable=self.MaxDepth, width=7) #Options for the minimum number of samples before an internal", "= StringVar() self.MaxFeatures.set('none') self.MaxFeatures_Integer = StringVar() self.MaxFeatures_Float = StringVar() self.MaxFeatures_Float.set('0.1') self.MaxFeatures_None = ttk.Radiobutton(frame,", "ttk.Label(frame, text='Verbose Level:') self.Verbose = StringVar() self.Verbose.set('0') self.Verbose_Box = ttk.Entry(frame, textvariable=self.Verbose, width=7) def", "self.MinFractionLeaf_Box = ttk.Entry(frame, textvariable=self.MinFractionLeaf, width=7) #Options for batch size self.Subsample_Label = ttk.Label(frame, text='Batch", "row=7, sticky=(W)) self.MaxFeatures_Label.grid(column=0, row=8, sticky=(W)) self.MaxFeatures_None.grid(column=0, row=9, sticky=(W)) self.MaxFeatures_Integer_Button.grid(column=0, row=10, sticky=(W)) self.MaxFeatures_Integer_Box.grid(column=1, row=10,", "= StringVar() self.LearningRate.set('0.1') self.LearningRate_Box = Spinbox(frame, textvariable=self.LearningRate, from_=0.0, to=1.0, increment=0.01, width=5) #Options for", "Leaf Nodes:') self.MinSamplesLeaf = StringVar() self.MinSamplesLeaf.set('1') self.MinSamplesLeaf_Box = ttk.Entry(frame, textvariable=self.MinSamplesLeaf, width=7) #Options for", "self.Subsample_Label = ttk.Label(frame, text='Batch Size:') self.Subsample = StringVar() self.Subsample.set('1.0') self.Subsample_Box = Spinbox(frame, from_=0.0,", "row=8, sticky=(W)) self.MaxFeatures_None.grid(column=0, row=9, sticky=(W)) self.MaxFeatures_Integer_Button.grid(column=0, row=10, sticky=(W)) self.MaxFeatures_Integer_Box.grid(column=1, row=10, sticky=(W)) self.MaxFeatures_Float_Button.grid(column=0, row=11,", "self.MinSamplesSplit_Label.grid(column=0, columnspan=2, row=4, sticky=(W)) self.MinSamplesSplit_Box.grid(column=2, row=4, sticky=(W)) self.MinSamplesLeaf_Label.grid(column=0, columnspan=2, row=5, sticky=(W)) self.MinSamplesLeaf_Box.grid(column=2, row=5,", "increment=0.01) self.MaxFeatures_Auto = ttk.Radiobutton(frame, text='Auto', variable=self.MaxFeatures, value='auto') self.MaxFeatures_Log2 = ttk.Radiobutton(frame, text='Log2', variable=self.MaxFeatures, value='log2')", "text=\"Learning Rate:\") self.LearningRate = StringVar() self.LearningRate.set('0.1') self.LearningRate_Box = Spinbox(frame, textvariable=self.LearningRate, from_=0.0, to=1.0, increment=0.01,", "text='Min # of Leaf Nodes:') self.MinSamplesLeaf = StringVar() self.MinSamplesLeaf.set('1') self.MinSamplesLeaf_Box = ttk.Entry(frame, textvariable=self.MinSamplesLeaf,", "row=6, sticky=(W)) self.Subsample_Label.grid(column=0, row=7, sticky=(W)) self.Subsample_Box.grid(column=1, row=7, sticky=(W)) self.MaxFeatures_Label.grid(column=0, row=8, sticky=(W)) self.MaxFeatures_None.grid(column=0, row=9,", "self.Estimators_Label = ttk.Label(frame, text='# of Stages:') self.Estimators = StringVar() self.Estimators.set('100') self.Estimators_Box = ttk.Entry(frame,", "text='# of Stages:') self.Estimators = StringVar() self.Estimators.set('100') self.Estimators_Box = ttk.Entry(frame, textvariable=self.Estimators, width=7) #Options", "self.MaxFeatures_Label = ttk.Label(frame, text='Max Features:') self.MaxFeatures = StringVar() self.MaxFeatures.set('none') self.MaxFeatures_Integer = StringVar() self.MaxFeatures_Float", "tkinter import * from tkinter import ttk class Gradient_Boosting_Classifier(Algorithm): def __init__(self, frame): self.frame", "= ttk.Radiobutton(frame, text='Auto', variable=self.MaxFeatures, value='auto') self.MaxFeatures_Log2 = ttk.Radiobutton(frame, text='Log2', variable=self.MaxFeatures, value='log2') #Options for", "StringVar() self.MaxDepth.set('0') self.MaxDepth_Box = ttk.Entry(frame, textvariable=self.MaxDepth, width=7) #Options for the minimum number of", "sticky=(W)) self.MinSamplesSplit_Label.grid(column=0, columnspan=2, row=4, sticky=(W)) self.MinSamplesSplit_Box.grid(column=2, row=4, sticky=(W)) self.MinSamplesLeaf_Label.grid(column=0, columnspan=2, row=5, sticky=(W)) self.MinSamplesLeaf_Box.grid(column=2,", "size self.Subsample_Label = ttk.Label(frame, text='Batch Size:') self.Subsample = StringVar() self.Subsample.set('1.0') self.Subsample_Box = Spinbox(frame,", "StringVar() self.MaxFeatures.set('none') self.MaxFeatures_Integer = StringVar() self.MaxFeatures_Float = StringVar() self.MaxFeatures_Float.set('0.1') self.MaxFeatures_None = ttk.Radiobutton(frame, text='None',", "self.Loss = StringVar() self.Loss.set('deviance') self.Loss_Deviance = ttk.Radiobutton(frame, text='Deviance', variable=self.Loss, value='deviance') self.Loss_Exponential = ttk.Radiobutton(frame,", "width=7) def Display_Options(self): #Display options for the Decision Tree Classifier. self.clear_frame(self.frame) #Insert the", "__init__(self, frame): self.frame = frame self.name = \"Gradient Boosting Classifier\" #Options for the", "for the minimum fraction of leaf nodes self.MinFractionLeaf_Label = ttk.Label(frame, text='Min % of", "variable=self.MaxLeafNodes, value='integer') self.MaxLeafNodes_Integer_Box = ttk.Entry(frame, textvariable=self.MaxLeafNodes_Integer, width=7) #Options for verbosity self.Verbose_Label = ttk.Label(frame,", "self.MaxLeafNodes_Integer.set('0') self.MaxLeafNodes_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxLeafNodes, value='integer') self.MaxLeafNodes_Integer_Box = ttk.Entry(frame, textvariable=self.MaxLeafNodes_Integer, width=7) #Options", "width=7) #Options for the minimum number of leaf nodes self.MinSamplesLeaf_Label = ttk.Label(frame, text='Min", "Display_Options(self): #Display options for the Decision Tree Classifier. self.clear_frame(self.frame) #Insert the options into", "self.MaxFeatures_Integer_Button.grid(column=0, row=10, sticky=(W)) self.MaxFeatures_Integer_Box.grid(column=1, row=10, sticky=(W)) self.MaxFeatures_Float_Button.grid(column=0, row=11, sticky=(W)) self.MaxFeatures_Float_Box.grid(column=1, row=11, sticky=(W)) self.MaxFeatures_Auto.grid(column=0,", "ttk.Label(frame, text='# of Stages:') self.Estimators = StringVar() self.Estimators.set('100') self.Estimators_Box = ttk.Entry(frame, textvariable=self.Estimators, width=7)", "ttk.Radiobutton(frame, text='Number:', variable=self.MaxFeatures, value='integer') self.MaxFeatures_Integer_Box = ttk.Entry(frame, textvariable=self.MaxFeatures_Integer, width=7) self.MaxFeatures_Float_Button = ttk.Radiobutton(frame, text='Percentage:',", "Classifier. self.clear_frame(self.frame) #Insert the options into the frame. self.Loss_Label.grid(column=0,row=0, sticky=(W)) self.Loss_Deviance.grid(column=1, row=0, sticky=(W))", "self.MaxFeatures_Label.grid(column=0, row=8, sticky=(W)) self.MaxFeatures_None.grid(column=0, row=9, sticky=(W)) self.MaxFeatures_Integer_Button.grid(column=0, row=10, sticky=(W)) self.MaxFeatures_Integer_Box.grid(column=1, row=10, sticky=(W)) self.MaxFeatures_Float_Button.grid(column=0,", "self.MinSamplesSplit.set('2') self.MinSamplesSplit_Box = ttk.Entry(frame, textvariable=self.MinSamplesSplit, width=7) #Options for the minimum number of leaf", "increment=0.01, textvariable=self.Subsample, width=5) #Options for max features. self.MaxFeatures_Label = ttk.Label(frame, text='Max Features:') self.MaxFeatures", "width=5) #Options for max features. self.MaxFeatures_Label = ttk.Label(frame, text='Max Features:') self.MaxFeatures = StringVar()", "#Options for the minimum number of samples before an internal node is split.", "self.MinFractionLeaf_Label = ttk.Label(frame, text='Min % of Leaf Nodes:') self.MinFractionLeaf = StringVar() self.MinFractionLeaf.set('0.0') self.MinFractionLeaf_Box", "value='none') self.MaxLeafNodes_Integer = StringVar() self.MaxLeafNodes_Integer.set('0') self.MaxLeafNodes_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxLeafNodes, value='integer') self.MaxLeafNodes_Integer_Box =", "#Insert the options into the frame. self.Loss_Label.grid(column=0,row=0, sticky=(W)) self.Loss_Deviance.grid(column=1, row=0, sticky=(W)) self.Loss_Exponential.grid(column=2, row=0,", "import ttk class Gradient_Boosting_Classifier(Algorithm): def __init__(self, frame): self.frame = frame self.name = \"Gradient", "self.MinFractionLeaf_Label.grid(column=0, columnspan=2, row=6, sticky=(W)) self.MinFractionLeaf_Box.grid(column=2, row=6, sticky=(W)) self.Subsample_Label.grid(column=0, row=7, sticky=(W)) self.Subsample_Box.grid(column=1, row=7, sticky=(W))", "self.MaxLeafNodes = StringVar() self.MaxLeafNodes.set('none') self.MaxLeafNodes_None = ttk.Radiobutton(frame, text='None', variable=self.MaxLeafNodes, value='none') self.MaxLeafNodes_Integer = StringVar()", "row=1, sticky=(W)) self.LearningRate_Box.grid(column=1, row=1, sticky=(W)) self.Estimators_Label.grid(column=0, row=2, sticky=(W)) self.Estimators_Box.grid(column=1, row=2, sticky=(W)) self.MaxDepth_Label.grid(column=0, row=3,", "Function:\") self.Loss = StringVar() self.Loss.set('deviance') self.Loss_Deviance = ttk.Radiobutton(frame, text='Deviance', variable=self.Loss, value='deviance') self.Loss_Exponential =", "StringVar() self.MinFractionLeaf.set('0.0') self.MinFractionLeaf_Box = ttk.Entry(frame, textvariable=self.MinFractionLeaf, width=7) #Options for batch size self.Subsample_Label =", "node is split. self.MinSamplesSplit_Label = ttk.Label(frame, text='Min Samples to Split:') self.MinSamplesSplit = StringVar()", "algorithm import Algorithm from tkinter import * from tkinter import ttk class Gradient_Boosting_Classifier(Algorithm):" ]
[ "= size self.vocab_size = vocab_size self.output_dim = size self.zero_index = zero_index self._mask =", "if type(mask) == NeuralVariable else mask self._init = init self._load_values = load_values self.init(1)", "if self._load_values is not None: self.embed_matrix = theano.shared(self._load_values, name=\"embeddings\") else: self.embed_matrix = self.create_weight(self.vocab_size,", "x, mask=None): mask = mask if mask else self._mask if self.zero_index is not", "load_values=None, init=None): from deepy.core.neural_var import NeuralVariable super(WordEmbedding, self).__init__(\"word_embed\") self.size = size self.vocab_size =", "ret_tensor = self.embed_matrix[x.flatten()].reshape(list(x.shape) + [self.size]) if mask: if x.ndim == 2: ret_tensor *=", "self._mask if self.zero_index is not None: mask = T.neq(x, self.zero_index) # To avoid", "deepy.layers import NeuralLayer class WordEmbedding(NeuralLayer): \"\"\" Word embedding layer. The word embeddings are", "zero_index=None, mask=None, load_values=None, init=None): from deepy.core.neural_var import NeuralVariable super(WordEmbedding, self).__init__(\"word_embed\") self.size = size", "type(mask) == NeuralVariable else mask self._init = init self._load_values = load_values self.init(1) def", "class WordEmbedding(NeuralLayer): \"\"\" Word embedding layer. The word embeddings are randomly initialized, and", "self.zero_index = zero_index self._mask = mask.tensor if type(mask) == NeuralVariable else mask self._init", "= init self._load_values = load_values self.init(1) def prepare(self): if self._load_values is not None:", "= mask if mask else self._mask if self.zero_index is not None: mask =", "T.cast(x * mask, \"int32\") if x.ndim == 1: ret_tensor = self.embed_matrix[x] else: ret_tensor", "theano.tensor as T from deepy.layers import NeuralLayer class WordEmbedding(NeuralLayer): \"\"\" Word embedding layer.", "else self._mask if self.zero_index is not None: mask = T.neq(x, self.zero_index) # To", "from deepy.layers import NeuralLayer class WordEmbedding(NeuralLayer): \"\"\" Word embedding layer. The word embeddings", "__init__(self, size, vocab_size, zero_index=None, mask=None, load_values=None, init=None): from deepy.core.neural_var import NeuralVariable super(WordEmbedding, self).__init__(\"word_embed\")", "= zero_index self._mask = mask.tensor if type(mask) == NeuralVariable else mask self._init =", "= T.neq(x, self.zero_index) # To avoid negative index x = T.cast(x * mask,", "as T from deepy.layers import NeuralLayer class WordEmbedding(NeuralLayer): \"\"\" Word embedding layer. The", "mask.tensor if type(mask) == NeuralVariable else mask self._init = init self._load_values = load_values", "compute_tensor(self, x, mask=None): mask = mask if mask else self._mask if self.zero_index is", "mask if mask else self._mask if self.zero_index is not None: mask = T.neq(x,", "T.neq(x, self.zero_index) # To avoid negative index x = T.cast(x * mask, \"int32\")", "init=None): from deepy.core.neural_var import NeuralVariable super(WordEmbedding, self).__init__(\"word_embed\") self.size = size self.vocab_size = vocab_size", "None: mask = T.neq(x, self.zero_index) # To avoid negative index x = T.cast(x", "* mask, \"int32\") if x.ndim == 1: ret_tensor = self.embed_matrix[x] else: ret_tensor =", "\"\"\" Word embedding layer. The word embeddings are randomly initialized, and are learned", "= mask.tensor if type(mask) == NeuralVariable else mask self._init = init self._load_values =", "mask=None): mask = mask if mask else self._mask if self.zero_index is not None:", "self).__init__(\"word_embed\") self.size = size self.vocab_size = vocab_size self.output_dim = size self.zero_index = zero_index", "NeuralVariable else mask self._init = init self._load_values = load_values self.init(1) def prepare(self): if", "over the time. \"\"\" def __init__(self, size, vocab_size, zero_index=None, mask=None, load_values=None, init=None): from", "if mask else self._mask if self.zero_index is not None: mask = T.neq(x, self.zero_index)", "deepy.core.neural_var import NeuralVariable super(WordEmbedding, self).__init__(\"word_embed\") self.size = size self.vocab_size = vocab_size self.output_dim =", "self._mask = mask.tensor if type(mask) == NeuralVariable else mask self._init = init self._load_values", "learned over the time. \"\"\" def __init__(self, size, vocab_size, zero_index=None, mask=None, load_values=None, init=None):", "NeuralLayer class WordEmbedding(NeuralLayer): \"\"\" Word embedding layer. The word embeddings are randomly initialized,", "mask=None, load_values=None, init=None): from deepy.core.neural_var import NeuralVariable super(WordEmbedding, self).__init__(\"word_embed\") self.size = size self.vocab_size", "import NeuralVariable super(WordEmbedding, self).__init__(\"word_embed\") self.size = size self.vocab_size = vocab_size self.output_dim = size", "are learned over the time. \"\"\" def __init__(self, size, vocab_size, zero_index=None, mask=None, load_values=None,", "def prepare(self): if self._load_values is not None: self.embed_matrix = theano.shared(self._load_values, name=\"embeddings\") else: self.embed_matrix", "\"\"\" def __init__(self, size, vocab_size, zero_index=None, mask=None, load_values=None, init=None): from deepy.core.neural_var import NeuralVariable", "not None: mask = T.neq(x, self.zero_index) # To avoid negative index x =", "not None: self.embed_matrix = theano.shared(self._load_values, name=\"embeddings\") else: self.embed_matrix = self.create_weight(self.vocab_size, self.size, \"embeddings\", initializer=self._init)", "theano import theano.tensor as T from deepy.layers import NeuralLayer class WordEmbedding(NeuralLayer): \"\"\" Word", "# -*- coding: utf-8 -*- import theano import theano.tensor as T from deepy.layers", "import theano.tensor as T from deepy.layers import NeuralLayer class WordEmbedding(NeuralLayer): \"\"\" Word embedding", "= self.embed_matrix[x.flatten()].reshape(list(x.shape) + [self.size]) if mask: if x.ndim == 2: ret_tensor *= mask[:,", "randomly initialized, and are learned over the time. \"\"\" def __init__(self, size, vocab_size,", "name=\"embeddings\") else: self.embed_matrix = self.create_weight(self.vocab_size, self.size, \"embeddings\", initializer=self._init) self.register_parameters(self.embed_matrix) def compute_tensor(self, x, mask=None):", "self.register_parameters(self.embed_matrix) def compute_tensor(self, x, mask=None): mask = mask if mask else self._mask if", "import NeuralLayer class WordEmbedding(NeuralLayer): \"\"\" Word embedding layer. The word embeddings are randomly", "<gh_stars>100-1000 #!/usr/bin/env python # -*- coding: utf-8 -*- import theano import theano.tensor as", "WordEmbedding(NeuralLayer): \"\"\" Word embedding layer. The word embeddings are randomly initialized, and are", "self.size = size self.vocab_size = vocab_size self.output_dim = size self.zero_index = zero_index self._mask", "negative index x = T.cast(x * mask, \"int32\") if x.ndim == 1: ret_tensor", "== 1: ret_tensor = self.embed_matrix[x] else: ret_tensor = self.embed_matrix[x.flatten()].reshape(list(x.shape) + [self.size]) if mask:", "initializer=self._init) self.register_parameters(self.embed_matrix) def compute_tensor(self, x, mask=None): mask = mask if mask else self._mask", "self.zero_index is not None: mask = T.neq(x, self.zero_index) # To avoid negative index", "mask = T.neq(x, self.zero_index) # To avoid negative index x = T.cast(x *", "self._init = init self._load_values = load_values self.init(1) def prepare(self): if self._load_values is not", "mask: if x.ndim == 2: ret_tensor *= mask[:, :, None] elif x.ndim ==", "= T.cast(x * mask, \"int32\") if x.ndim == 1: ret_tensor = self.embed_matrix[x] else:", "self.embed_matrix[x] else: ret_tensor = self.embed_matrix[x.flatten()].reshape(list(x.shape) + [self.size]) if mask: if x.ndim == 2:", "import theano import theano.tensor as T from deepy.layers import NeuralLayer class WordEmbedding(NeuralLayer): \"\"\"", "if x.ndim == 2: ret_tensor *= mask[:, :, None] elif x.ndim == 1:", "= self.embed_matrix[x] else: ret_tensor = self.embed_matrix[x.flatten()].reshape(list(x.shape) + [self.size]) if mask: if x.ndim ==", "embeddings are randomly initialized, and are learned over the time. \"\"\" def __init__(self,", "from deepy.core.neural_var import NeuralVariable super(WordEmbedding, self).__init__(\"word_embed\") self.size = size self.vocab_size = vocab_size self.output_dim", "[self.size]) if mask: if x.ndim == 2: ret_tensor *= mask[:, :, None] elif", "init self._load_values = load_values self.init(1) def prepare(self): if self._load_values is not None: self.embed_matrix", "2: ret_tensor *= mask[:, :, None] elif x.ndim == 1: ret_tensor *= mask[:,", "def __init__(self, size, vocab_size, zero_index=None, mask=None, load_values=None, init=None): from deepy.core.neural_var import NeuralVariable super(WordEmbedding,", "= size self.zero_index = zero_index self._mask = mask.tensor if type(mask) == NeuralVariable else", "== NeuralVariable else mask self._init = init self._load_values = load_values self.init(1) def prepare(self):", "mask = mask if mask else self._mask if self.zero_index is not None: mask", "is not None: mask = T.neq(x, self.zero_index) # To avoid negative index x", "if x.ndim == 1: ret_tensor = self.embed_matrix[x] else: ret_tensor = self.embed_matrix[x.flatten()].reshape(list(x.shape) + [self.size])", "the time. \"\"\" def __init__(self, size, vocab_size, zero_index=None, mask=None, load_values=None, init=None): from deepy.core.neural_var", "+ [self.size]) if mask: if x.ndim == 2: ret_tensor *= mask[:, :, None]", "NeuralVariable super(WordEmbedding, self).__init__(\"word_embed\") self.size = size self.vocab_size = vocab_size self.output_dim = size self.zero_index", "1: ret_tensor = self.embed_matrix[x] else: ret_tensor = self.embed_matrix[x.flatten()].reshape(list(x.shape) + [self.size]) if mask: if", "== 2: ret_tensor *= mask[:, :, None] elif x.ndim == 1: ret_tensor *=", "\"embeddings\", initializer=self._init) self.register_parameters(self.embed_matrix) def compute_tensor(self, x, mask=None): mask = mask if mask else", "self.size, \"embeddings\", initializer=self._init) self.register_parameters(self.embed_matrix) def compute_tensor(self, x, mask=None): mask = mask if mask", "self.vocab_size = vocab_size self.output_dim = size self.zero_index = zero_index self._mask = mask.tensor if", "*= mask[:, :, None] elif x.ndim == 1: ret_tensor *= mask[:, None] return", "= load_values self.init(1) def prepare(self): if self._load_values is not None: self.embed_matrix = theano.shared(self._load_values,", "time. \"\"\" def __init__(self, size, vocab_size, zero_index=None, mask=None, load_values=None, init=None): from deepy.core.neural_var import", "self.embed_matrix[x.flatten()].reshape(list(x.shape) + [self.size]) if mask: if x.ndim == 2: ret_tensor *= mask[:, :,", "self._load_values = load_values self.init(1) def prepare(self): if self._load_values is not None: self.embed_matrix =", "load_values self.init(1) def prepare(self): if self._load_values is not None: self.embed_matrix = theano.shared(self._load_values, name=\"embeddings\")", "size self.vocab_size = vocab_size self.output_dim = size self.zero_index = zero_index self._mask = mask.tensor", "\"int32\") if x.ndim == 1: ret_tensor = self.embed_matrix[x] else: ret_tensor = self.embed_matrix[x.flatten()].reshape(list(x.shape) +", "self.init(1) def prepare(self): if self._load_values is not None: self.embed_matrix = theano.shared(self._load_values, name=\"embeddings\") else:", "#!/usr/bin/env python # -*- coding: utf-8 -*- import theano import theano.tensor as T", "are randomly initialized, and are learned over the time. \"\"\" def __init__(self, size,", "and are learned over the time. \"\"\" def __init__(self, size, vocab_size, zero_index=None, mask=None,", "None: self.embed_matrix = theano.shared(self._load_values, name=\"embeddings\") else: self.embed_matrix = self.create_weight(self.vocab_size, self.size, \"embeddings\", initializer=self._init) self.register_parameters(self.embed_matrix)", "= self.create_weight(self.vocab_size, self.size, \"embeddings\", initializer=self._init) self.register_parameters(self.embed_matrix) def compute_tensor(self, x, mask=None): mask = mask", "python # -*- coding: utf-8 -*- import theano import theano.tensor as T from", "-*- coding: utf-8 -*- import theano import theano.tensor as T from deepy.layers import", "mask else self._mask if self.zero_index is not None: mask = T.neq(x, self.zero_index) #", "self.zero_index) # To avoid negative index x = T.cast(x * mask, \"int32\") if", "x.ndim == 2: ret_tensor *= mask[:, :, None] elif x.ndim == 1: ret_tensor", "initialized, and are learned over the time. \"\"\" def __init__(self, size, vocab_size, zero_index=None,", "# To avoid negative index x = T.cast(x * mask, \"int32\") if x.ndim", "mask self._init = init self._load_values = load_values self.init(1) def prepare(self): if self._load_values is", "vocab_size self.output_dim = size self.zero_index = zero_index self._mask = mask.tensor if type(mask) ==", "self.embed_matrix = self.create_weight(self.vocab_size, self.size, \"embeddings\", initializer=self._init) self.register_parameters(self.embed_matrix) def compute_tensor(self, x, mask=None): mask =", "else: ret_tensor = self.embed_matrix[x.flatten()].reshape(list(x.shape) + [self.size]) if mask: if x.ndim == 2: ret_tensor", "else mask self._init = init self._load_values = load_values self.init(1) def prepare(self): if self._load_values", "The word embeddings are randomly initialized, and are learned over the time. \"\"\"", "zero_index self._mask = mask.tensor if type(mask) == NeuralVariable else mask self._init = init", "size self.zero_index = zero_index self._mask = mask.tensor if type(mask) == NeuralVariable else mask", "mask, \"int32\") if x.ndim == 1: ret_tensor = self.embed_matrix[x] else: ret_tensor = self.embed_matrix[x.flatten()].reshape(list(x.shape)", "coding: utf-8 -*- import theano import theano.tensor as T from deepy.layers import NeuralLayer", "-*- import theano import theano.tensor as T from deepy.layers import NeuralLayer class WordEmbedding(NeuralLayer):", "self.output_dim = size self.zero_index = zero_index self._mask = mask.tensor if type(mask) == NeuralVariable", "theano.shared(self._load_values, name=\"embeddings\") else: self.embed_matrix = self.create_weight(self.vocab_size, self.size, \"embeddings\", initializer=self._init) self.register_parameters(self.embed_matrix) def compute_tensor(self, x,", "To avoid negative index x = T.cast(x * mask, \"int32\") if x.ndim ==", "index x = T.cast(x * mask, \"int32\") if x.ndim == 1: ret_tensor =", "ret_tensor *= mask[:, :, None] elif x.ndim == 1: ret_tensor *= mask[:, None]", "utf-8 -*- import theano import theano.tensor as T from deepy.layers import NeuralLayer class", "vocab_size, zero_index=None, mask=None, load_values=None, init=None): from deepy.core.neural_var import NeuralVariable super(WordEmbedding, self).__init__(\"word_embed\") self.size =", "super(WordEmbedding, self).__init__(\"word_embed\") self.size = size self.vocab_size = vocab_size self.output_dim = size self.zero_index =", "self.create_weight(self.vocab_size, self.size, \"embeddings\", initializer=self._init) self.register_parameters(self.embed_matrix) def compute_tensor(self, x, mask=None): mask = mask if", "layer. The word embeddings are randomly initialized, and are learned over the time.", "word embeddings are randomly initialized, and are learned over the time. \"\"\" def", "x = T.cast(x * mask, \"int32\") if x.ndim == 1: ret_tensor = self.embed_matrix[x]", "embedding layer. The word embeddings are randomly initialized, and are learned over the", "else: self.embed_matrix = self.create_weight(self.vocab_size, self.size, \"embeddings\", initializer=self._init) self.register_parameters(self.embed_matrix) def compute_tensor(self, x, mask=None): mask", "if self.zero_index is not None: mask = T.neq(x, self.zero_index) # To avoid negative", "mask[:, :, None] elif x.ndim == 1: ret_tensor *= mask[:, None] return ret_tensor", "= vocab_size self.output_dim = size self.zero_index = zero_index self._mask = mask.tensor if type(mask)", "Word embedding layer. The word embeddings are randomly initialized, and are learned over", "self._load_values is not None: self.embed_matrix = theano.shared(self._load_values, name=\"embeddings\") else: self.embed_matrix = self.create_weight(self.vocab_size, self.size,", "x.ndim == 1: ret_tensor = self.embed_matrix[x] else: ret_tensor = self.embed_matrix[x.flatten()].reshape(list(x.shape) + [self.size]) if", "size, vocab_size, zero_index=None, mask=None, load_values=None, init=None): from deepy.core.neural_var import NeuralVariable super(WordEmbedding, self).__init__(\"word_embed\") self.size", "ret_tensor = self.embed_matrix[x] else: ret_tensor = self.embed_matrix[x.flatten()].reshape(list(x.shape) + [self.size]) if mask: if x.ndim", "prepare(self): if self._load_values is not None: self.embed_matrix = theano.shared(self._load_values, name=\"embeddings\") else: self.embed_matrix =", "T from deepy.layers import NeuralLayer class WordEmbedding(NeuralLayer): \"\"\" Word embedding layer. The word", "avoid negative index x = T.cast(x * mask, \"int32\") if x.ndim == 1:", "if mask: if x.ndim == 2: ret_tensor *= mask[:, :, None] elif x.ndim", "is not None: self.embed_matrix = theano.shared(self._load_values, name=\"embeddings\") else: self.embed_matrix = self.create_weight(self.vocab_size, self.size, \"embeddings\",", "= theano.shared(self._load_values, name=\"embeddings\") else: self.embed_matrix = self.create_weight(self.vocab_size, self.size, \"embeddings\", initializer=self._init) self.register_parameters(self.embed_matrix) def compute_tensor(self,", "def compute_tensor(self, x, mask=None): mask = mask if mask else self._mask if self.zero_index", "self.embed_matrix = theano.shared(self._load_values, name=\"embeddings\") else: self.embed_matrix = self.create_weight(self.vocab_size, self.size, \"embeddings\", initializer=self._init) self.register_parameters(self.embed_matrix) def" ]
[ "import pandas as pd # Data Preprocessing dataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None)", "por lo que tiene sentido que sea la asociación más fuerte del dataset", "pruebas para obtener buenos valores para los parámetros # Visualising the results results", "última # El primer valor numérico es la confidence, en el primer caso", "parámetros # Visualising the results results = list(rules) myResults = [list(x) for x", "no perder la primera línea # Conversión del dataset a una lista de", "None para no perder la primera línea # Conversión del dataset a una", "hasta llegar a la última # El primer valor numérico es la confidence,", "en el primer caso 0.29, por lo que la gente que compre light", "en la lista que aparece para cada valor hasta llegar a la última", "valores para los parámetros # Visualising the results results = list(rules) myResults =", "de 4.84 es alto con respecto a nuestro límite inferior (3), por lo", "min_support # min_lift: Porque sí, hacer pruebas para obtener buenos valores para los", "= 2) # min_support: El support de un producto que es adquirido 3", "# La lista se encuentra ordenada de mayor a menor relevancia (lift) #", "que aparece para cada valor hasta llegar a la última # El primer", "veces al día: 3*7/7501 # min_confidence: Buena combinación para min_support # min_lift: Porque", "que están relacionados # Hacer doble click en la lista que aparece para", "a menor relevancia (lift) # En frozenset aparecen los productos que están relacionados", "pd # Data Preprocessing dataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None) # Se especifica", "# Apriori # Importing the libraries import numpy as np import matplotlib.pyplot as", "se encuentra ordenada de mayor a menor relevancia (lift) # En frozenset aparecen", "3, min_length = 2) # min_support: El support de un producto que es", "aparecen los productos que están relacionados # Hacer doble click en la lista", "un producto que es adquirido 3 veces al día: 3*7/7501 # min_confidence: Buena", "3*7/7501 # min_confidence: Buena combinación para min_support # min_lift: Porque sí, hacer pruebas", "min_confidence = 0.2, min_lift = 3, min_length = 2) # min_support: El support", "dataset a una lista de listas transactions = [] for i in range(0,", "min_lift: Porque sí, hacer pruebas para obtener buenos valores para los parámetros #", "alto con respecto a nuestro límite inferior (3), por lo que tiene sentido", "# Data Preprocessing dataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None) # Se especifica el", "del dataset a una lista de listas transactions = [] for i in", "list(rules) myResults = [list(x) for x in results] # La lista se encuentra", "cream tiene un 29 % de probabilidades de comprar pollo # El segundo", "for x in results] # La lista se encuentra ordenada de mayor a", "de un producto que es adquirido 3 veces al día: 3*7/7501 # min_confidence:", "= apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length =", "# min_confidence: Buena combinación para min_support # min_lift: Porque sí, hacer pruebas para", "0.2, min_lift = 3, min_length = 2) # min_support: El support de un", "% de probabilidades de comprar pollo # El segundo y último es el", "a la última # El primer valor numérico es la confidence, en el", "último es el lift. Un valor de 4.84 es alto con respecto a", "la lista que aparece para cada valor hasta llegar a la última #", "= list(rules) myResults = [list(x) for x in results] # La lista se", "segundo y último es el lift. Un valor de 4.84 es alto con", "de comprar pollo # El segundo y último es el lift. Un valor", "la primera línea # Conversión del dataset a una lista de listas transactions", "np import matplotlib.pyplot as plt import pandas as pd # Data Preprocessing dataset", "primer caso 0.29, por lo que la gente que compre light cream tiene", "es adquirido 3 veces al día: 3*7/7501 # min_confidence: Buena combinación para min_support", "3 veces al día: 3*7/7501 # min_confidence: Buena combinación para min_support # min_lift:", "in range(0, 7501): transactions.append([str(dataset.values[i,j]) for j in range(0, 20)]) # Training Apriori on", "para min_support # min_lift: Porque sí, hacer pruebas para obtener buenos valores para", "apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length = 2)", "matplotlib.pyplot as plt import pandas as pd # Data Preprocessing dataset = pd.read_csv('Market_Basket_Optimisation.csv',", "de probabilidades de comprar pollo # El segundo y último es el lift.", "el header = None para no perder la primera línea # Conversión del", "lista de listas transactions = [] for i in range(0, 7501): transactions.append([str(dataset.values[i,j]) for", "on the dataset from apyori import apriori rules = apriori(transactions, min_support = 0.003,", "que es adquirido 3 veces al día: 3*7/7501 # min_confidence: Buena combinación para", "compre light cream tiene un 29 % de probabilidades de comprar pollo #", "min_lift = 3, min_length = 2) # min_support: El support de un producto", "por lo que la gente que compre light cream tiene un 29 %", "al día: 3*7/7501 # min_confidence: Buena combinación para min_support # min_lift: Porque sí,", "x in results] # La lista se encuentra ordenada de mayor a menor", "i in range(0, 7501): transactions.append([str(dataset.values[i,j]) for j in range(0, 20)]) # Training Apriori", "La lista se encuentra ordenada de mayor a menor relevancia (lift) # En", "29 % de probabilidades de comprar pollo # El segundo y último es", "for i in range(0, 7501): transactions.append([str(dataset.values[i,j]) for j in range(0, 20)]) # Training", "transactions.append([str(dataset.values[i,j]) for j in range(0, 20)]) # Training Apriori on the dataset from", "results results = list(rules) myResults = [list(x) for x in results] # La", "dataset from apyori import apriori rules = apriori(transactions, min_support = 0.003, min_confidence =", "ordenada de mayor a menor relevancia (lift) # En frozenset aparecen los productos", "light cream tiene un 29 % de probabilidades de comprar pollo # El", "header = None para no perder la primera línea # Conversión del dataset", "apriori rules = apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3,", "límite inferior (3), por lo que tiene sentido que sea la asociación más", "los productos que están relacionados # Hacer doble click en la lista que", "0.003, min_confidence = 0.2, min_lift = 3, min_length = 2) # min_support: El", "para obtener buenos valores para los parámetros # Visualising the results results =", "# El segundo y último es el lift. Un valor de 4.84 es", "rules = apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length", "respecto a nuestro límite inferior (3), por lo que tiene sentido que sea", "myResults = [list(x) for x in results] # La lista se encuentra ordenada", "pd.read_csv('Market_Basket_Optimisation.csv', header = None) # Se especifica el header = None para no", "<reponame>xavialex/Machine-Learning-Templates # Apriori # Importing the libraries import numpy as np import matplotlib.pyplot", "from apyori import apriori rules = apriori(transactions, min_support = 0.003, min_confidence = 0.2,", "plt import pandas as pd # Data Preprocessing dataset = pd.read_csv('Market_Basket_Optimisation.csv', header =", "relacionados # Hacer doble click en la lista que aparece para cada valor", "El primer valor numérico es la confidence, en el primer caso 0.29, por", "a nuestro límite inferior (3), por lo que tiene sentido que sea la", "4.84 es alto con respecto a nuestro límite inferior (3), por lo que", "0.29, por lo que la gente que compre light cream tiene un 29", "in range(0, 20)]) # Training Apriori on the dataset from apyori import apriori", "support de un producto que es adquirido 3 veces al día: 3*7/7501 #", "combinación para min_support # min_lift: Porque sí, hacer pruebas para obtener buenos valores", "comprar pollo # El segundo y último es el lift. Un valor de", "click en la lista que aparece para cada valor hasta llegar a la", "y último es el lift. Un valor de 4.84 es alto con respecto", "En frozenset aparecen los productos que están relacionados # Hacer doble click en", "= [list(x) for x in results] # La lista se encuentra ordenada de", "import numpy as np import matplotlib.pyplot as plt import pandas as pd #", "as plt import pandas as pd # Data Preprocessing dataset = pd.read_csv('Market_Basket_Optimisation.csv', header", "Apriori on the dataset from apyori import apriori rules = apriori(transactions, min_support =", "# Importing the libraries import numpy as np import matplotlib.pyplot as plt import", "import apriori rules = apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift =", "un 29 % de probabilidades de comprar pollo # El segundo y último", "valor hasta llegar a la última # El primer valor numérico es la", "para no perder la primera línea # Conversión del dataset a una lista", "Buena combinación para min_support # min_lift: Porque sí, hacer pruebas para obtener buenos", "# Visualising the results results = list(rules) myResults = [list(x) for x in", "as pd # Data Preprocessing dataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None) # Se", "la última # El primer valor numérico es la confidence, en el primer", "caso 0.29, por lo que la gente que compre light cream tiene un", "Un valor de 4.84 es alto con respecto a nuestro límite inferior (3),", "# Hacer doble click en la lista que aparece para cada valor hasta", "lift. Un valor de 4.84 es alto con respecto a nuestro límite inferior", "línea # Conversión del dataset a una lista de listas transactions = []", "encuentra ordenada de mayor a menor relevancia (lift) # En frozenset aparecen los", "primer valor numérico es la confidence, en el primer caso 0.29, por lo", "producto que es adquirido 3 veces al día: 3*7/7501 # min_confidence: Buena combinación", "listas transactions = [] for i in range(0, 7501): transactions.append([str(dataset.values[i,j]) for j in", "Training Apriori on the dataset from apyori import apriori rules = apriori(transactions, min_support", "# Training Apriori on the dataset from apyori import apriori rules = apriori(transactions,", "min_support: El support de un producto que es adquirido 3 veces al día:", "el lift. Un valor de 4.84 es alto con respecto a nuestro límite", "# min_support: El support de un producto que es adquirido 3 veces al", "tiene un 29 % de probabilidades de comprar pollo # El segundo y", "j in range(0, 20)]) # Training Apriori on the dataset from apyori import", "llegar a la última # El primer valor numérico es la confidence, en", "Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas", "# Conversión del dataset a una lista de listas transactions = [] for", "= 0.003, min_confidence = 0.2, min_lift = 3, min_length = 2) # min_support:", "la confidence, en el primer caso 0.29, por lo que la gente que", "Apriori # Importing the libraries import numpy as np import matplotlib.pyplot as plt", "probabilidades de comprar pollo # El segundo y último es el lift. Un", "nuestro límite inferior (3), por lo que tiene sentido que sea la asociación", "results = list(rules) myResults = [list(x) for x in results] # La lista", "Porque sí, hacer pruebas para obtener buenos valores para los parámetros # Visualising", "menor relevancia (lift) # En frozenset aparecen los productos que están relacionados #", "(lift) # En frozenset aparecen los productos que están relacionados # Hacer doble", "frozenset aparecen los productos que están relacionados # Hacer doble click en la", "El support de un producto que es adquirido 3 veces al día: 3*7/7501", "import matplotlib.pyplot as plt import pandas as pd # Data Preprocessing dataset =", "adquirido 3 veces al día: 3*7/7501 # min_confidence: Buena combinación para min_support #", "lista que aparece para cada valor hasta llegar a la última # El", "están relacionados # Hacer doble click en la lista que aparece para cada", "the results results = list(rules) myResults = [list(x) for x in results] #", "min_confidence: Buena combinación para min_support # min_lift: Porque sí, hacer pruebas para obtener", "the dataset from apyori import apriori rules = apriori(transactions, min_support = 0.003, min_confidence", "range(0, 20)]) # Training Apriori on the dataset from apyori import apriori rules", "libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd", "obtener buenos valores para los parámetros # Visualising the results results = list(rules)", "Se especifica el header = None para no perder la primera línea #", "especifica el header = None para no perder la primera línea # Conversión", "= 0.2, min_lift = 3, min_length = 2) # min_support: El support de", "a una lista de listas transactions = [] for i in range(0, 7501):", "numpy as np import matplotlib.pyplot as plt import pandas as pd # Data", "dataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None) # Se especifica el header = None", "for j in range(0, 20)]) # Training Apriori on the dataset from apyori", "productos que están relacionados # Hacer doble click en la lista que aparece", "lo que la gente que compre light cream tiene un 29 % de", "min_length = 2) # min_support: El support de un producto que es adquirido", "(3), por lo que tiene sentido que sea la asociación más fuerte del", "Data Preprocessing dataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None) # Se especifica el header", "results] # La lista se encuentra ordenada de mayor a menor relevancia (lift)", "20)]) # Training Apriori on the dataset from apyori import apriori rules =", "sí, hacer pruebas para obtener buenos valores para los parámetros # Visualising the", "de mayor a menor relevancia (lift) # En frozenset aparecen los productos que", "lista se encuentra ordenada de mayor a menor relevancia (lift) # En frozenset", "numérico es la confidence, en el primer caso 0.29, por lo que la", "la gente que compre light cream tiene un 29 % de probabilidades de", "# Se especifica el header = None para no perder la primera línea", "= None para no perder la primera línea # Conversión del dataset a", "# min_lift: Porque sí, hacer pruebas para obtener buenos valores para los parámetros", "cada valor hasta llegar a la última # El primer valor numérico es", "= [] for i in range(0, 7501): transactions.append([str(dataset.values[i,j]) for j in range(0, 20)])", "aparece para cada valor hasta llegar a la última # El primer valor", "as np import matplotlib.pyplot as plt import pandas as pd # Data Preprocessing", "doble click en la lista que aparece para cada valor hasta llegar a", "gente que compre light cream tiene un 29 % de probabilidades de comprar", "range(0, 7501): transactions.append([str(dataset.values[i,j]) for j in range(0, 20)]) # Training Apriori on the", "con respecto a nuestro límite inferior (3), por lo que tiene sentido que", "una lista de listas transactions = [] for i in range(0, 7501): transactions.append([str(dataset.values[i,j])", "relevancia (lift) # En frozenset aparecen los productos que están relacionados # Hacer", "header = None) # Se especifica el header = None para no perder", "pandas as pd # Data Preprocessing dataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None) #", "[] for i in range(0, 7501): transactions.append([str(dataset.values[i,j]) for j in range(0, 20)]) #", "es alto con respecto a nuestro límite inferior (3), por lo que tiene", "inferior (3), por lo que tiene sentido que sea la asociación más fuerte", "None) # Se especifica el header = None para no perder la primera", "Conversión del dataset a una lista de listas transactions = [] for i", "transactions = [] for i in range(0, 7501): transactions.append([str(dataset.values[i,j]) for j in range(0,", "Preprocessing dataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None) # Se especifica el header =", "= None) # Se especifica el header = None para no perder la", "= 3, min_length = 2) # min_support: El support de un producto que", "# En frozenset aparecen los productos que están relacionados # Hacer doble click", "Visualising the results results = list(rules) myResults = [list(x) for x in results]", "el primer caso 0.29, por lo que la gente que compre light cream", "mayor a menor relevancia (lift) # En frozenset aparecen los productos que están", "min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length = 2) #", "día: 3*7/7501 # min_confidence: Buena combinación para min_support # min_lift: Porque sí, hacer", "buenos valores para los parámetros # Visualising the results results = list(rules) myResults", "confidence, en el primer caso 0.29, por lo que la gente que compre", "de listas transactions = [] for i in range(0, 7501): transactions.append([str(dataset.values[i,j]) for j", "2) # min_support: El support de un producto que es adquirido 3 veces", "= pd.read_csv('Market_Basket_Optimisation.csv', header = None) # Se especifica el header = None para", "Hacer doble click en la lista que aparece para cada valor hasta llegar", "pollo # El segundo y último es el lift. Un valor de 4.84", "para los parámetros # Visualising the results results = list(rules) myResults = [list(x)", "7501): transactions.append([str(dataset.values[i,j]) for j in range(0, 20)]) # Training Apriori on the dataset", "que la gente que compre light cream tiene un 29 % de probabilidades", "los parámetros # Visualising the results results = list(rules) myResults = [list(x) for", "hacer pruebas para obtener buenos valores para los parámetros # Visualising the results", "in results] # La lista se encuentra ordenada de mayor a menor relevancia", "valor numérico es la confidence, en el primer caso 0.29, por lo que", "para cada valor hasta llegar a la última # El primer valor numérico", "es el lift. Un valor de 4.84 es alto con respecto a nuestro", "[list(x) for x in results] # La lista se encuentra ordenada de mayor", "primera línea # Conversión del dataset a una lista de listas transactions =", "El segundo y último es el lift. Un valor de 4.84 es alto", "the libraries import numpy as np import matplotlib.pyplot as plt import pandas as", "apyori import apriori rules = apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift", "que compre light cream tiene un 29 % de probabilidades de comprar pollo", "valor de 4.84 es alto con respecto a nuestro límite inferior (3), por", "# El primer valor numérico es la confidence, en el primer caso 0.29,", "perder la primera línea # Conversión del dataset a una lista de listas", "es la confidence, en el primer caso 0.29, por lo que la gente" ]
[ "halls, etc), as well as to the user's location.\" 'Queries that are related", "as f: json_data = json.load(f) print(json_data.keys()) # ['domains', 'version'] domains = json_data['domains'] print('domain", "json_data['domains'][3]['name'] 'weather' (Pdb) json_data['domains'][2]['name'] 'transit' (Pdb) json_data['domains'][1]['name'] 'reservation' (Pdb) json_data['domains'][0]['name'] 'places' print(len(domains)) #", "# length (Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys() dict_keys(['text', 'results_per_service']) json_data['domains'][3]['intents'][0]['queries'][0]['text'] print(domains.keys()) # ['description', '@type', 'intents', 'name']", "that are related to transit and navigation.' 'Queries that relate to weather.' (Pdb)", "'weather' (Pdb) json_data['domains'][2]['name'] 'transit' (Pdb) json_data['domains'][1]['name'] 'reservation' (Pdb) json_data['domains'][0]['name'] 'places' print(len(domains)) # 4", "json_data['domains'][3]['intents'][0]['queries'][0].keys() dict_keys(['text', 'results_per_service']) json_data['domains'][3]['intents'][0]['queries'][0]['text'] print(domains.keys()) # ['description', '@type', 'intents', 'name'] \"Queries that are", "in queries: temp['query'] = query['text'] corr_data.append(temp) print(len(corr_data)) corr_data = np.array(corr_data) np.save('benchmark_data.npy', corr_data) \"\"\"", "(Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys() dict_keys(['text', 'results_per_service']) json_data['domains'][3]['intents'][0]['queries'][0]['text'] print(domains.keys()) # ['description', '@type', 'intents', 'name'] \"Queries that", "print(len(domains)) # 4 (Pdb) len(json_data['domains'][0]['intents']) 4 (Pdb) len(json_data['domains'][1]['intents']) 2 (Pdb) len(json_data['domains'][2]['intents']) 3 (Pdb)", "= 'benchmark_data.json' with open(file, 'r') as f: json_data = json.load(f) print(json_data.keys()) # ['domains',", "for query in queries: temp['query'] = query['text'] corr_data.append(temp) print(len(corr_data)) corr_data = np.array(corr_data) np.save('benchmark_data.npy',", "temp['intent'] = intent['name'] queries = intent['queries'] print('query length', len(queries)) for query in queries:", "'transit' (Pdb) json_data['domains'][1]['name'] 'reservation' (Pdb) json_data['domains'][0]['name'] 'places' print(len(domains)) # 4 (Pdb) len(json_data['domains'][0]['intents']) 4", "json_data['domains'] print('domain length', len(domains)) corr_data = [] for domain in domains: temp =", "queries = intent['queries'] print('query length', len(queries)) for query in queries: temp['query'] = query['text']", "import json import numpy as np file = 'benchmark_data.json' with open(file, 'r') as", "domain in domains: temp = {} temp['long_description'] = domain['description'] temp['short_description'] = domain['name'] intents", "user's location.\" 'Queries that are related to reservation.' 'Queries that are related to", "corr_data = np.array(corr_data) np.save('benchmark_data.npy', corr_data) \"\"\" (Pdb) json_data['domains'][3]['intents'][0].keys() dict_keys(['description', 'benchmark', 'queries', 'slots', '@type',", "domains = json_data['domains'] print('domain length', len(domains)) corr_data = [] for domain in domains:", "domain['description'] temp['short_description'] = domain['name'] intents = domain['intents'] print('intent length', len(intents)) for intent in", "are related to transit and navigation.' 'Queries that relate to weather.' (Pdb) json_data['domains'][3]['name']", "(Pdb) json_data['domains'][3]['intents'][0].keys() dict_keys(['description', 'benchmark', 'queries', 'slots', '@type', 'name']) len(json_data['domains'][3]['intents'][0]['description']) json_data['domains'][3]['intents'][0]['queries'] # length (Pdb)", "np file = 'benchmark_data.json' with open(file, 'r') as f: json_data = json.load(f) print(json_data.keys())", "intent['name'] queries = intent['queries'] print('query length', len(queries)) for query in queries: temp['query'] =", "[] for domain in domains: temp = {} temp['long_description'] = domain['description'] temp['short_description'] =", "domain['intents'] print('intent length', len(intents)) for intent in intents: temp['intent'] = intent['name'] queries =", "length', len(domains)) corr_data = [] for domain in domains: temp = {} temp['long_description']", "well as to the user's location.\" 'Queries that are related to reservation.' 'Queries", "(Pdb) len(json_data['domains'][0]['intents']) 4 (Pdb) len(json_data['domains'][1]['intents']) 2 (Pdb) len(json_data['domains'][2]['intents']) 3 (Pdb) len(json_data['domains'][3]['intents']) 1 \"\"\"", "'r') as f: json_data = json.load(f) print(json_data.keys()) # ['domains', 'version'] domains = json_data['domains']", "len(queries)) for query in queries: temp['query'] = query['text'] corr_data.append(temp) print(len(corr_data)) corr_data = np.array(corr_data)", "that relate to weather.' (Pdb) json_data['domains'][3]['name'] 'weather' (Pdb) json_data['domains'][2]['name'] 'transit' (Pdb) json_data['domains'][1]['name'] 'reservation'", "intent in intents: temp['intent'] = intent['name'] queries = intent['queries'] print('query length', len(queries)) for", "'@type', 'intents', 'name'] \"Queries that are related to places (restaurants, shops, concert halls,", "that are related to reservation.' 'Queries that are related to transit and navigation.'", "= domain['description'] temp['short_description'] = domain['name'] intents = domain['intents'] print('intent length', len(intents)) for intent", "= json_data['domains'] print('domain length', len(domains)) corr_data = [] for domain in domains: temp", "len(json_data['domains'][3]['intents'][0]['description']) json_data['domains'][3]['intents'][0]['queries'] # length (Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys() dict_keys(['text', 'results_per_service']) json_data['domains'][3]['intents'][0]['queries'][0]['text'] print(domains.keys()) # ['description', '@type',", "json_data['domains'][3]['intents'][0]['queries'][0]['text'] print(domains.keys()) # ['description', '@type', 'intents', 'name'] \"Queries that are related to places", "print(json_data.keys()) # ['domains', 'version'] domains = json_data['domains'] print('domain length', len(domains)) corr_data = []", "np.array(corr_data) np.save('benchmark_data.npy', corr_data) \"\"\" (Pdb) json_data['domains'][3]['intents'][0].keys() dict_keys(['description', 'benchmark', 'queries', 'slots', '@type', 'name']) len(json_data['domains'][3]['intents'][0]['description'])", "['description', '@type', 'intents', 'name'] \"Queries that are related to places (restaurants, shops, concert", "= [] for domain in domains: temp = {} temp['long_description'] = domain['description'] temp['short_description']", "with open(file, 'r') as f: json_data = json.load(f) print(json_data.keys()) # ['domains', 'version'] domains", "pdb import json import numpy as np file = 'benchmark_data.json' with open(file, 'r')", "= np.array(corr_data) np.save('benchmark_data.npy', corr_data) \"\"\" (Pdb) json_data['domains'][3]['intents'][0].keys() dict_keys(['description', 'benchmark', 'queries', 'slots', '@type', 'name'])", "concert halls, etc), as well as to the user's location.\" 'Queries that are", "location.\" 'Queries that are related to reservation.' 'Queries that are related to transit", "'places' print(len(domains)) # 4 (Pdb) len(json_data['domains'][0]['intents']) 4 (Pdb) len(json_data['domains'][1]['intents']) 2 (Pdb) len(json_data['domains'][2]['intents']) 3", "places (restaurants, shops, concert halls, etc), as well as to the user's location.\"", "dict_keys(['description', 'benchmark', 'queries', 'slots', '@type', 'name']) len(json_data['domains'][3]['intents'][0]['description']) json_data['domains'][3]['intents'][0]['queries'] # length (Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys() dict_keys(['text',", "= domain['name'] intents = domain['intents'] print('intent length', len(intents)) for intent in intents: temp['intent']", "in domains: temp = {} temp['long_description'] = domain['description'] temp['short_description'] = domain['name'] intents =", "['domains', 'version'] domains = json_data['domains'] print('domain length', len(domains)) corr_data = [] for domain", "np.save('benchmark_data.npy', corr_data) \"\"\" (Pdb) json_data['domains'][3]['intents'][0].keys() dict_keys(['description', 'benchmark', 'queries', 'slots', '@type', 'name']) len(json_data['domains'][3]['intents'][0]['description']) json_data['domains'][3]['intents'][0]['queries']", "'Queries that are related to transit and navigation.' 'Queries that relate to weather.'", "json import numpy as np file = 'benchmark_data.json' with open(file, 'r') as f:", "= json.load(f) print(json_data.keys()) # ['domains', 'version'] domains = json_data['domains'] print('domain length', len(domains)) corr_data", "(Pdb) json_data['domains'][0]['name'] 'places' print(len(domains)) # 4 (Pdb) len(json_data['domains'][0]['intents']) 4 (Pdb) len(json_data['domains'][1]['intents']) 2 (Pdb)", "that are related to places (restaurants, shops, concert halls, etc), as well as", "weather.' (Pdb) json_data['domains'][3]['name'] 'weather' (Pdb) json_data['domains'][2]['name'] 'transit' (Pdb) json_data['domains'][1]['name'] 'reservation' (Pdb) json_data['domains'][0]['name'] 'places'", "intents = domain['intents'] print('intent length', len(intents)) for intent in intents: temp['intent'] = intent['name']", "'name'] \"Queries that are related to places (restaurants, shops, concert halls, etc), as", "are related to reservation.' 'Queries that are related to transit and navigation.' 'Queries", "intent['queries'] print('query length', len(queries)) for query in queries: temp['query'] = query['text'] corr_data.append(temp) print(len(corr_data))", "as to the user's location.\" 'Queries that are related to reservation.' 'Queries that", "'Queries that relate to weather.' (Pdb) json_data['domains'][3]['name'] 'weather' (Pdb) json_data['domains'][2]['name'] 'transit' (Pdb) json_data['domains'][1]['name']", "to places (restaurants, shops, concert halls, etc), as well as to the user's", "import numpy as np file = 'benchmark_data.json' with open(file, 'r') as f: json_data", "as np file = 'benchmark_data.json' with open(file, 'r') as f: json_data = json.load(f)", "json_data['domains'][0]['name'] 'places' print(len(domains)) # 4 (Pdb) len(json_data['domains'][0]['intents']) 4 (Pdb) len(json_data['domains'][1]['intents']) 2 (Pdb) len(json_data['domains'][2]['intents'])", "in intents: temp['intent'] = intent['name'] queries = intent['queries'] print('query length', len(queries)) for query", "'results_per_service']) json_data['domains'][3]['intents'][0]['queries'][0]['text'] print(domains.keys()) # ['description', '@type', 'intents', 'name'] \"Queries that are related to", "file = 'benchmark_data.json' with open(file, 'r') as f: json_data = json.load(f) print(json_data.keys()) #", "domain['name'] intents = domain['intents'] print('intent length', len(intents)) for intent in intents: temp['intent'] =", "'intents', 'name'] \"Queries that are related to places (restaurants, shops, concert halls, etc),", "shops, concert halls, etc), as well as to the user's location.\" 'Queries that", "\"\"\" (Pdb) json_data['domains'][3]['intents'][0].keys() dict_keys(['description', 'benchmark', 'queries', 'slots', '@type', 'name']) len(json_data['domains'][3]['intents'][0]['description']) json_data['domains'][3]['intents'][0]['queries'] # length", "length', len(queries)) for query in queries: temp['query'] = query['text'] corr_data.append(temp) print(len(corr_data)) corr_data =", "navigation.' 'Queries that relate to weather.' (Pdb) json_data['domains'][3]['name'] 'weather' (Pdb) json_data['domains'][2]['name'] 'transit' (Pdb)", "(Pdb) json_data['domains'][3]['name'] 'weather' (Pdb) json_data['domains'][2]['name'] 'transit' (Pdb) json_data['domains'][1]['name'] 'reservation' (Pdb) json_data['domains'][0]['name'] 'places' print(len(domains))", "= {} temp['long_description'] = domain['description'] temp['short_description'] = domain['name'] intents = domain['intents'] print('intent length',", "to reservation.' 'Queries that are related to transit and navigation.' 'Queries that relate", "(Pdb) json_data['domains'][2]['name'] 'transit' (Pdb) json_data['domains'][1]['name'] 'reservation' (Pdb) json_data['domains'][0]['name'] 'places' print(len(domains)) # 4 (Pdb)", "numpy as np file = 'benchmark_data.json' with open(file, 'r') as f: json_data =", "'reservation' (Pdb) json_data['domains'][0]['name'] 'places' print(len(domains)) # 4 (Pdb) len(json_data['domains'][0]['intents']) 4 (Pdb) len(json_data['domains'][1]['intents']) 2", "to the user's location.\" 'Queries that are related to reservation.' 'Queries that are", "print('query length', len(queries)) for query in queries: temp['query'] = query['text'] corr_data.append(temp) print(len(corr_data)) corr_data", "len(intents)) for intent in intents: temp['intent'] = intent['name'] queries = intent['queries'] print('query length',", "len(domains)) corr_data = [] for domain in domains: temp = {} temp['long_description'] =", "(Pdb) json_data['domains'][1]['name'] 'reservation' (Pdb) json_data['domains'][0]['name'] 'places' print(len(domains)) # 4 (Pdb) len(json_data['domains'][0]['intents']) 4 (Pdb)", "length', len(intents)) for intent in intents: temp['intent'] = intent['name'] queries = intent['queries'] print('query", "temp['short_description'] = domain['name'] intents = domain['intents'] print('intent length', len(intents)) for intent in intents:", "'benchmark_data.json' with open(file, 'r') as f: json_data = json.load(f) print(json_data.keys()) # ['domains', 'version']", "json.load(f) print(json_data.keys()) # ['domains', 'version'] domains = json_data['domains'] print('domain length', len(domains)) corr_data =", "'queries', 'slots', '@type', 'name']) len(json_data['domains'][3]['intents'][0]['description']) json_data['domains'][3]['intents'][0]['queries'] # length (Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys() dict_keys(['text', 'results_per_service']) json_data['domains'][3]['intents'][0]['queries'][0]['text']", "= domain['intents'] print('intent length', len(intents)) for intent in intents: temp['intent'] = intent['name'] queries", "related to reservation.' 'Queries that are related to transit and navigation.' 'Queries that", "f: json_data = json.load(f) print(json_data.keys()) # ['domains', 'version'] domains = json_data['domains'] print('domain length',", "domains: temp = {} temp['long_description'] = domain['description'] temp['short_description'] = domain['name'] intents = domain['intents']", "open(file, 'r') as f: json_data = json.load(f) print(json_data.keys()) # ['domains', 'version'] domains =", "length (Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys() dict_keys(['text', 'results_per_service']) json_data['domains'][3]['intents'][0]['queries'][0]['text'] print(domains.keys()) # ['description', '@type', 'intents', 'name'] \"Queries", "the user's location.\" 'Queries that are related to reservation.' 'Queries that are related", "# 4 (Pdb) len(json_data['domains'][0]['intents']) 4 (Pdb) len(json_data['domains'][1]['intents']) 2 (Pdb) len(json_data['domains'][2]['intents']) 3 (Pdb) len(json_data['domains'][3]['intents'])", "print(len(corr_data)) corr_data = np.array(corr_data) np.save('benchmark_data.npy', corr_data) \"\"\" (Pdb) json_data['domains'][3]['intents'][0].keys() dict_keys(['description', 'benchmark', 'queries', 'slots',", "= query['text'] corr_data.append(temp) print(len(corr_data)) corr_data = np.array(corr_data) np.save('benchmark_data.npy', corr_data) \"\"\" (Pdb) json_data['domains'][3]['intents'][0].keys() dict_keys(['description',", "transit and navigation.' 'Queries that relate to weather.' (Pdb) json_data['domains'][3]['name'] 'weather' (Pdb) json_data['domains'][2]['name']", "(restaurants, shops, concert halls, etc), as well as to the user's location.\" 'Queries", "temp = {} temp['long_description'] = domain['description'] temp['short_description'] = domain['name'] intents = domain['intents'] print('intent", "corr_data = [] for domain in domains: temp = {} temp['long_description'] = domain['description']", "{} temp['long_description'] = domain['description'] temp['short_description'] = domain['name'] intents = domain['intents'] print('intent length', len(intents))", "queries: temp['query'] = query['text'] corr_data.append(temp) print(len(corr_data)) corr_data = np.array(corr_data) np.save('benchmark_data.npy', corr_data) \"\"\" (Pdb)", "related to transit and navigation.' 'Queries that relate to weather.' (Pdb) json_data['domains'][3]['name'] 'weather'", "and navigation.' 'Queries that relate to weather.' (Pdb) json_data['domains'][3]['name'] 'weather' (Pdb) json_data['domains'][2]['name'] 'transit'", "= intent['name'] queries = intent['queries'] print('query length', len(queries)) for query in queries: temp['query']", "for intent in intents: temp['intent'] = intent['name'] queries = intent['queries'] print('query length', len(queries))", "4 (Pdb) len(json_data['domains'][0]['intents']) 4 (Pdb) len(json_data['domains'][1]['intents']) 2 (Pdb) len(json_data['domains'][2]['intents']) 3 (Pdb) len(json_data['domains'][3]['intents']) 1", "'version'] domains = json_data['domains'] print('domain length', len(domains)) corr_data = [] for domain in", "relate to weather.' (Pdb) json_data['domains'][3]['name'] 'weather' (Pdb) json_data['domains'][2]['name'] 'transit' (Pdb) json_data['domains'][1]['name'] 'reservation' (Pdb)", "temp['query'] = query['text'] corr_data.append(temp) print(len(corr_data)) corr_data = np.array(corr_data) np.save('benchmark_data.npy', corr_data) \"\"\" (Pdb) json_data['domains'][3]['intents'][0].keys()", "= intent['queries'] print('query length', len(queries)) for query in queries: temp['query'] = query['text'] corr_data.append(temp)", "etc), as well as to the user's location.\" 'Queries that are related to", "print('domain length', len(domains)) corr_data = [] for domain in domains: temp = {}", "json_data = json.load(f) print(json_data.keys()) # ['domains', 'version'] domains = json_data['domains'] print('domain length', len(domains))", "for domain in domains: temp = {} temp['long_description'] = domain['description'] temp['short_description'] = domain['name']", "related to places (restaurants, shops, concert halls, etc), as well as to the", "query in queries: temp['query'] = query['text'] corr_data.append(temp) print(len(corr_data)) corr_data = np.array(corr_data) np.save('benchmark_data.npy', corr_data)", "import pdb import json import numpy as np file = 'benchmark_data.json' with open(file,", "to weather.' (Pdb) json_data['domains'][3]['name'] 'weather' (Pdb) json_data['domains'][2]['name'] 'transit' (Pdb) json_data['domains'][1]['name'] 'reservation' (Pdb) json_data['domains'][0]['name']", "corr_data) \"\"\" (Pdb) json_data['domains'][3]['intents'][0].keys() dict_keys(['description', 'benchmark', 'queries', 'slots', '@type', 'name']) len(json_data['domains'][3]['intents'][0]['description']) json_data['domains'][3]['intents'][0]['queries'] #", "intents: temp['intent'] = intent['name'] queries = intent['queries'] print('query length', len(queries)) for query in", "\"Queries that are related to places (restaurants, shops, concert halls, etc), as well", "'name']) len(json_data['domains'][3]['intents'][0]['description']) json_data['domains'][3]['intents'][0]['queries'] # length (Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys() dict_keys(['text', 'results_per_service']) json_data['domains'][3]['intents'][0]['queries'][0]['text'] print(domains.keys()) # ['description',", "reservation.' 'Queries that are related to transit and navigation.' 'Queries that relate to", "dict_keys(['text', 'results_per_service']) json_data['domains'][3]['intents'][0]['queries'][0]['text'] print(domains.keys()) # ['description', '@type', 'intents', 'name'] \"Queries that are related", "json_data['domains'][3]['intents'][0].keys() dict_keys(['description', 'benchmark', 'queries', 'slots', '@type', 'name']) len(json_data['domains'][3]['intents'][0]['description']) json_data['domains'][3]['intents'][0]['queries'] # length (Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys()", "'slots', '@type', 'name']) len(json_data['domains'][3]['intents'][0]['description']) json_data['domains'][3]['intents'][0]['queries'] # length (Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys() dict_keys(['text', 'results_per_service']) json_data['domains'][3]['intents'][0]['queries'][0]['text'] print(domains.keys())", "as well as to the user's location.\" 'Queries that are related to reservation.'", "'@type', 'name']) len(json_data['domains'][3]['intents'][0]['description']) json_data['domains'][3]['intents'][0]['queries'] # length (Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys() dict_keys(['text', 'results_per_service']) json_data['domains'][3]['intents'][0]['queries'][0]['text'] print(domains.keys()) #", "print('intent length', len(intents)) for intent in intents: temp['intent'] = intent['name'] queries = intent['queries']", "'benchmark', 'queries', 'slots', '@type', 'name']) len(json_data['domains'][3]['intents'][0]['description']) json_data['domains'][3]['intents'][0]['queries'] # length (Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys() dict_keys(['text', 'results_per_service'])", "# ['domains', 'version'] domains = json_data['domains'] print('domain length', len(domains)) corr_data = [] for", "are related to places (restaurants, shops, concert halls, etc), as well as to", "# ['description', '@type', 'intents', 'name'] \"Queries that are related to places (restaurants, shops,", "corr_data.append(temp) print(len(corr_data)) corr_data = np.array(corr_data) np.save('benchmark_data.npy', corr_data) \"\"\" (Pdb) json_data['domains'][3]['intents'][0].keys() dict_keys(['description', 'benchmark', 'queries',", "json_data['domains'][3]['intents'][0]['queries'] # length (Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys() dict_keys(['text', 'results_per_service']) json_data['domains'][3]['intents'][0]['queries'][0]['text'] print(domains.keys()) # ['description', '@type', 'intents',", "'Queries that are related to reservation.' 'Queries that are related to transit and", "to transit and navigation.' 'Queries that relate to weather.' (Pdb) json_data['domains'][3]['name'] 'weather' (Pdb)", "json_data['domains'][2]['name'] 'transit' (Pdb) json_data['domains'][1]['name'] 'reservation' (Pdb) json_data['domains'][0]['name'] 'places' print(len(domains)) # 4 (Pdb) len(json_data['domains'][0]['intents'])", "json_data['domains'][1]['name'] 'reservation' (Pdb) json_data['domains'][0]['name'] 'places' print(len(domains)) # 4 (Pdb) len(json_data['domains'][0]['intents']) 4 (Pdb) len(json_data['domains'][1]['intents'])", "query['text'] corr_data.append(temp) print(len(corr_data)) corr_data = np.array(corr_data) np.save('benchmark_data.npy', corr_data) \"\"\" (Pdb) json_data['domains'][3]['intents'][0].keys() dict_keys(['description', 'benchmark',", "print(domains.keys()) # ['description', '@type', 'intents', 'name'] \"Queries that are related to places (restaurants,", "temp['long_description'] = domain['description'] temp['short_description'] = domain['name'] intents = domain['intents'] print('intent length', len(intents)) for" ]
[ "curr_worker == len(worker_ids): curr_worker = 0 print('-----------------------------------------------------------------') worker_status_temp = run_dask_tools.get_dask_worker_status(curr_client, custom_index=custom_index_curr) get_worker_free_temp =", "results_dask['energyMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep +", "test = 1 # This will hold information about run states # self.uber_dock_folder", "return [generate_ga_dat, ] # self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') # self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) # self.generate_ga_dat_object_file.close() #", "1 # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.ledock_directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.ledock_folder_name) print('TADA ', self.ledock_directories)", "# json.dump(self.cluster_models, outfile) # pickle.dump(self.cluster_models, open(filename, \"wb\")) # TODO create folder for run", ">>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client) >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True) >>> # >>> # LasR_MOR_mmpbsa_calc.run_dask_docking(client)", ">>> EPI_uber_dock.prepare_rdock_settings() Convert with pybel to mol2 for receptor and sd for ligand", "optimz1 = 'OPTIMZ 9999 {0} -1\\n\\n'.format(self.flexaid_res_chain) # Rotational DOF of the ligand (0)", "self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) self.generate_ga_dat_object_file.close() self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}})", "ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #", "############################################################################################### gmmbpsa_min_mem = 1000 retries_num = 2 curr_index = 0 curr_worker = 0", "lines in Processed_files/BTN.inp # In our example, Biotin has 5 flexible bonds flexible_bonds_data", "= [] for i in flexible_bonds_data_text_list: if 'FLEDIH' in i: print(i) temp =", "# submitted_jobs_dask_temp, finished_jobs_temp = self.check_dask_jobs(submitted_jobs_dask,finished_jobs) finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) test =", "anchor dihedral of the ligand # Float in interval [1.0-30.0] VARDIH 5.0 #", "data_pre = {} data_pre.update({'tprName':tpr_filename, 'tprMem':tpr_mem}) data_pre.update({'mdpName':mdp_filename, 'mdpMem':mdp_mem}) data_pre.update({'indexName':index_filename, 'indexMem':index_mem}) self.dask_prep = data_pre for", "= 1 # # test = 1 ################################################################################################### test = 1 #################################################################################################### self.run_mmpbsa_dask", "BOOMFRAC 1.0 # Number of new individuals to generate at each generation #", "# Upload files to all clients client.upload_file # task = client.submit(run_dask_tools.upload_g_mmpbsa_files_dask, # big_future,", "18(10); 1365-1373 VCTPLA R # Use normalized surfaces in contacts NORMAR # Define", "Software for molecular docking: a review # This will be for leDock #", "distribution. # * Neither the name of the molmolpy Developers nor the names", "''' try: if self.setup_ledock_pameters is not False: # print(\"Running Vina\") # TODO need", "0: curr_index = 0 else: curr_index -= 1 pop_item.update({'workingDir':workstation_dir}) submitted_jobs.append(pop_item) # MAYBE CHECK", "= {} worker_status_free = None test = 1 # maybe 2 async threads,", "self.load_state_called = False self.ledock_title = self.receptor_name + '_' + self.ligand_name + '_LeDock Parameter", "in the target (water,metal,modified amino acids,cofactors,ligands) # To exclude these groups, uncomment the", "and chain A # Translational DOF of the ligand (-1) optimz1 = 'OPTIMZ", "dir_name=self.run_type) self.ledock_directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.ledock_folder_name) print('TADA ', self.ledock_directories) test = 1 # This", "self.molecule_name + '_' + self.run_type_samples + '.json' # This will hold information about", "first_index, second_index, molname='Unknown', receptor_name='Unknown', folder_path='.', job_name = 'Unknown', load_state_file=None): self.load_state_file = load_state_file if", "1 #self.g_mmpbsa_sim_states = self.state_data['energySoftware']['g_mmpbsa']['simStates'] #self.ledock_samples = self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list'] # Divide trajectory to number of", "# -*- coding: utf-8 -*- # !/usr/bin/env python # # @file __init__.py #", "self.dask_prep # run_dask_tools.upload_g_mmpbsa_files_dask(big_future) #TODO # Scatter a lot better using scatter for big", "len(worker_ids): curr_worker = 0 print('-----------------------------------------------------------------') worker_status_temp = run_dask_tools.get_dask_worker_status(curr_client, custom_index=custom_index_curr) get_worker_free_temp = run_dask_tools.check_free_resources(worker_status_temp) custom_index_curr", "= 0 curr_worker = 0 # prepare worker ids for easier switch worker_ids", "curr_client = client worker_status = run_dask_tools.get_dask_worker_status(curr_client) get_worker_free = run_dask_tools.check_free_resources(worker_status) import copy original_get_worker_free =", "id}) custom_index_curr = 3 while len(queue_jobs) > 0: if curr_index == len(queue_jobs): curr_index", "self.rdock_title = self.ledock_data['LeDock_params']['title'] # # self.receptor_file_ledock = self.ledock_data['LeDock_params']['receptorFile'] # self.ledock_rmsd = self.ledock_data['LeDock_params']['LeDockRMSD'] #", "out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['polar_filename'] out_mem = results_dask['polar_mem'] out_file", "and Extended radical plane # See McConkey et al. (2002) Bioinformatics. 18(10); 1365-1373", "2 # # # Upload files to all clients client.upload_file # task =", "self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run, 'RunFolder': self.g_mmpbsa_folder, 'absoluteJsonStates': self.absolute_json_state_file, 'directory': self.directories, 'folderCreated': self.folder_exists,", "'EPI' >>> receptor_name = 'LasR' >>> run_type = 'vina_sample' >>> >>> >>> >>>", "self.directories_samples = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name_samples) print('TADA ', self.directories_samples) self.json_samples_state_file = self.sim_folder_run_samples + os.sep +", ">>> >>> EPI_uber_dock.prepare_uber_dock_protocol() >>> EPI_uber_dock.run_uber_dock_protocol() Use together >>> self.prepare_uber_dock_protocol() for preparation >>> self.run_uber_dock_protocol()", "out_name = abs_folder + os.sep + results_dask['apolar_filename'] out_mem = results_dask['apolar_mem'] out_file = open(out_name,", "def set_ledock_path(self, path): print('LeDock path is set to ', path) self.ledock_path = path", "molecules in the target (always removed by default) # Only considered if EXCHET", "\\ \"--center_x {2} \" \\ \"--center_y {3} \" \\ \"--center_z {4} \" \\", "* Redistributions of source code must retain the above copyright # notice, this", "'/media/Work/MEGA/Programming/StressHormones/' >>> >>> >>> receptor_file = EPI_folder + os.sep + 'centroid_model_clust2.pdbqt' >>> ligand_file", "filedata = self.state_data self.save_state_data_json(filedata=self.before_dask, filename=self.absolute_json_state_file) # allow CPU to cool down # self.hold_nSec(5)", "= folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name_samples) print('TADA ', self.directories_samples) self.json_samples_state_file = self.sim_folder_run_samples + os.sep + self.receptor_name", "# self.ledock_ymax = self.ledock_data['LeDock_params']['ymax'] # self.ledock_zmin = self.ledock_data['LeDock_params']['zmin'] # self.ledock_zmax = self.ledock_data['LeDock_params']['zmax'] #", "the degrees of freedom (DOF) of the processed ligand with residue number 9999", "where d is the average number of neighbors, while original DBSCAN had memory", "# break # # test = 1 # print('Last Check of submitted jobs')", "# # TODO this works need to create a quiiee # # retries_num", "complex SCOOUT # Only calculate the CF for ligand atoms despite including flexible", "self.run_folder_name def prepare_g_mmpbsa_dask_protocol(self, dask_client=None, prep_g_mmpbsa=True): ''' prepare dask tasks for g_mmpbsa :return: '''", "= [] # Prepare outputs import copy self.before_dask = copy.deepcopy(self.state_data) ################################################################################ if self.g_mmpbsa_prepared", "= self.ledock_absolute_folder_name + os.sep + self.ligand_ledock_mol2 self.receptor_pybel.write(\"pdb\", self.absolute_receptor_ledock_pdb, overwrite=True) self.ligand_pybel.write(\"mol2\", self.absolute_ligand_ledock_mol2, overwrite=True) self.ledock_folder_exists", "Translational DOF of the ligand (-1) optimz1 = 'OPTIMZ 9999 {0} -1\\n\\n'.format(self.flexaid_res_chain) #", "self.run_folder_name_samples # Create folder don't forget # Exhaustiveness for all samples # self.directories", "file # elif 'vina_sample_' in i: # VIP.append(i) return VIP except Exception as", "to check whteter lepro ran fine print('Updated receptor with LePro\\n') os.chdir(self.uber_dock_folder) self.state_data['dockSoftware']['LeDock'].update( {'receptor_pdb':", "is False: print('prep gmmpbsa ', prep_g_mmpbsa) return 'Do not prepare run files' if", "interval [1-N] NUMGENER 500 # Use Adaptive Genetic-Algorithm # Value of 0 or", "All rights reserved. # Redistribution and use in source and binary forms, with", "EPI_uber_dock.prepare_uber_docker() >>> >>> >>> #This is for rDock, and it works so comment", "{'generateGA_param': self.generate_ga_dat_pameters, 'GA_DataName': self.generate_ga_dat, 'GA_DATA_Abs': self.generate_ga_dat_name_abs, 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() self.save_state_data_json()", "multiprocessing.current_process().pid print(\"Main Process with PID:{}\".format(current_pid)) # free_threads_for_Vina = num_threads - LeDock-rDock-FlexAid run_g_mmpbsa =", "'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() self.save_state_data_json() # TODO this part needs to", "= os.getcwd() return curr_folder + os.sep + self.run_folder_name def prepare_g_mmpbsa_dask_protocol(self, dask_client=None, prep_g_mmpbsa=True): '''", "with residue number 9999 and chain A # Translational DOF of the ligand", "allowable flexible bonds are listed as FLEDIH lines in Processed_files/BTN.inp # In our", "'GA_DATA_Abs': self.generate_ga_dat_name_abs, 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() self.save_state_data_json() # TODO this part", "def prepVinaSampleCommand(self, sample_num): # try: if self.setup_box is not False: # print(\"Running Vina\")", "return curr_folder + os.sep + self.run_folder_name def get_samples_run_folder_name(self): curr_folder = os.getcwd() print(\"Yippie yi", "out_mem = results_dask['out_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder +", "self.sim_folder_run = self.folder_path + os.sep + self.run_folder_name # Create folder don't forget #", "= self.check_dask_jobs(submitted_jobs_dask,finished_jobs) finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) test = 1 ###################################################3 #", "self.save_state_data_json() # TODO this part needs to be thought out #################################################################################################################### def prepare_samples_collection_run(self,", "traj_file.read() traj_filename = data['fileSave'] data['dask'].update({'tprName': tpr_filename}) data['dask'].update({'mdpName': mdp_filename}) data['dask'].update({'indexName': index_filename}) data['dask'].update({'trajMem':traj_mem, 'trajName':traj_filename}) data['dask'].update({'tprName':", "def set_flexaid_path(self, path): print('FlexAid path is set to ', path) self.flexaid_path = path", "= self.state_data_samples['boxSettings']['size_z'] self.num_modes = self.state_data_samples['boxSettings']['numModes'] def hold_nSec(self, n): for i in range(1, n", "standard_exhaust=128, num_samples_run=100, run_type='samples_run'): if self.setup_box is False: print('Please setup simulation box') sys.exit(0) self.run_type_samples", "'lepro_pdb': self.lepro_pdb_file, 'lepro_abs_pdb': self.ledock_absolute_folder_name + os.sep + self.lepro_pdb_file, 'abs_receptor_pdb': self.absolute_receptor_ledock_pdb, 'abs_ligand_mol2': self.absolute_ligand_ledock_mol2, 'LeDockFolderStatus':", "os.getcwd() return curr_folder + os.sep + self.run_folder_name def get_samples_run_folder_name(self): curr_folder = os.getcwd() print(\"Yippie", "pdbqt file # elif 'vina_sample_' in i: # VIP.append(i) return VIP except Exception", "= self.state_data['energySoftware']['g_mmpbsa'] test = 1 tpr_abs= full_g_mmpbsa_data['tprFile'] tpr_file = open(tpr_abs, 'rb') tpr_mem =", "1 # TODO # This part runs the main program submitted_jobs = []", "if curr_item_prog == 'g_mmpbsa': if workstation_freemem > gmmbpsa_min_mem and jobs_running <max_jobus: print('Submit MMPBSA", "'directory': self.directories_samples, 'setup': self.setup_box, 'folderCreated': self.folder_exists_samples, 'simStates': {}}) self.prepVinaSim_samples() self.save_state_data_json(filedata=self.state_data_samples, filename=self.json_samples_state_file) self.load_state_called_samples =", "'out' in i: VIP.append(i) # This is not necessary since info is inside", "Generate flexaid config input file Flexaid is very strict about spaces :return: '''", "molmolpy Developers nor the names of any # contributors may be used to", "+ self.run_type + '.json' if len(self.directories) == 0: print('Creating folder for g_mmpbsa run\\n')", "Integer in interval [1,N-1] where N is NUMCHROM STEADNUM 950 # Number of", "yolo') # # try: # self.LeDock_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] # self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] #", "print(dirname, '-') if dir_name in dirname: # # print(dir_name) dir_names.append(dirname) # print sorted(dir_names)", "works so comment this part for a while >>> EPI_uber_dock.prepare_rdock_settings() >>> EPI_uber_dock.generate_rdock_cavity() >>>", "preped = get_worker_free[worker]['preped'] # total_free_cores += preped['freeCores'] if prep_g_mmpbsa is False: print('prep gmmpbsa", "= True programs_dict = {'energySoftware': {'g_mmpbsa': {}}} self.state_data.update({'trajectoryFile': self.trajectory_file, 'mdpFile': self.mdp_file, 'tprFile': self.tpr_file,", "conditions and the following # disclaimer in the documentation and/or other materials provided", "use # BTN.inp has the unique RESNUMC identifier LIG9999A inplig = 'INPLIG '", "out_name = abs_folder + os.sep + results_dask['energyMM_filename'] out_mem = results_dask['energyMM_mem'] out_file = open(out_name,", "finished for pre_job in run_mmpbsa_queue: # print(pre_job) if pre_job['runFinished'] is False: final_queue_job.append(pre_job) test", "initial json configuration >>> EPI_uber_dock.prepare_rdock_settings() Convert with pybel to mol2 for receptor and", "{'simStates':{}} for i,traj in zip(select_indexes,select_frames): temp_state = {str(i):{}} temp_traj = self.trajectory_mdtraj[traj:traj+div_traj] temp_mdtraj.append(temp_traj) temp_mdtraj_indexes.append(i)", "broadcast=True) # for worker in get_worker_free: # worker_info = get_worker_free[worker] # worker_address =", "self.ledock_data['LeDock_params']['xmax'] # self.ledock_ymin = self.ledock_data['LeDock_params']['ymin'] # self.ledock_ymax = self.ledock_data['LeDock_params']['ymax'] # self.ledock_zmin = self.ledock_data['LeDock_params']['zmin']", "is None: # filename = self.json_state_file filename = self.absolute_json_state_file filedata = self.state_data #", "self.json_state_file = self.sim_folder_run + os.sep + self.receptor_name + '_' + self.molecule_name + '_'", "False self.ledock_title = self.receptor_name + '_' + self.ligand_name + '_LeDock Parameter file' self.ledock_rmsd", "self.state_data.copy() # # self.save_state_data_json() # TODO this part needs to be thought out", "'simStates': {}}) self.state_data.update(programs_dict) # self.prepVinaSim_exhaust() self.save_state_data_json() self.load_state_called = False else: self.load_state_file = self.json_state_file", "= self.ledock_data['LeDock_params']['xmin'] # self.ledock_xmax = self.ledock_data['LeDock_params']['xmax'] # self.ledock_ymin = self.ledock_data['LeDock_params']['ymin'] # self.ledock_ymax =", "self.state_data.update({'trajectoryFile': self.trajectory_file, 'mdpFile': self.mdp_file, 'tprFile': self.tpr_file, 'indexFile': self.index_file, 'runFolderName': self.run_folder_name, 'folderPath': self.folder_path, 'jsonStates':", "this list of conditions and the following disclaimer. # * Redistributions in binary", ":return: ''' self.flexaid_ga_dat_param_template = '''# Number of chromosomes (number individuals in the population)", ">>> EPI_uber_dock.generate_rdock_cavity() >>> # Prepare and run Dock programs >>> EPI_uber_dock.prep_rDock_dock_run_commands() >>> EPI_uber_dock.run_rDock_simulation(parallel=True,", "# self.save_state_data_json() # TODO this part needs to be thought out #################################################################################################################### def", "low_seed = 1 high_seed = 999999999 mgltools_utilities = '/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24' class GMMPBSAObject(object): \"\"\" Usage", "the memory complexity to O(n.d) where d is the average number of neighbors,", "None: # # filename = self.json_state_file # filename = self.absolute_json_state_file # filedata =", "tasks_upload = [] # big_future = client.scatter(self.dask_prep, broadcast=True) # for worker in get_worker_free:", "default) # Only considered if EXCHET is disabled # To include water molecules,", "topol_file, tpr_file, mdp_file, index_file, first_index, second_index, molname, receptor_name) >>> >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client)", "except Exception as e: # print(\"error in Sample runSim: \", e) # sys.exit(0)", "1: curr_index = 0 else: curr_index += 1 curr_worker += 1 time.sleep(10) test", "self.ledock_data['LeDock_params']['ymax'] # self.ledock_zmin = self.ledock_data['LeDock_params']['zmin'] # self.ledock_zmax = self.ledock_data['LeDock_params']['zmax'] # # except: #", "x_center=self.x_center, y_center=self.y_center, z_center=self.z_center) else: print('state has beeen loaded \\n') ############################################################################## def flexaid_generate_ga_dat_parameters(self): '''", "500 # Number of generations # Integer in interval [1-N] NUMGENER 500 #", "self.folder_path + os.sep + self.run_folder_name_samples # Create folder don't forget # Exhaustiveness for", "'tprFile': self.tpr_file, 'indexFile': self.index_file, 'runFolderName': self.run_folder_name, 'folderPath': self.folder_path, 'jsonStates': self.json_state_file, 'runType': self.run_type, 'molName':", "# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #", "sys.exit() print(\"LeDock command generation finished\") else: print('Please setup LeDock settings') except Exception as", "+ '.json' if len(self.directories) == 0: print('Creating folder for g_mmpbsa run\\n') print(self.sim_folder_run) folder_utils.create_folder(self.sim_folder_run)", "'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() ############################################################################################## def flexaid_generate_config_input_dask(self): '''", "None: self.load_state_data_json(self.load_state_file) else: print('G_MMPBSA Object has been created') self.trajectory_file = traj self.topology_file =", "load_state_file is not None: self.load_state_data_json(self.load_state_file) else: print('G_MMPBSA Object has been created') self.trajectory_file =", "workstation_freemem > gmmbpsa_min_mem and jobs_running <max_jobus: print('Submit MMPBSA job to DASK') pop_item =", "temp = i.split(' ') print(temp) flex_index = temp[-2] flexible_index_list.append(int(flex_index)) temp_line = 'OPTIMZ {0}", "+= run_docking_Vina[curr_Vina:curr_Vina + Vina] # curr_Vina += Vina # # test = 1", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED.", "self.x_center = self.state_data_samples['boxSettings']['center_x'] self.y_center = self.state_data_samples['boxSettings']['center_y'] self.z_center = self.state_data_samples['boxSettings']['center_z'] self.x_size = self.state_data_samples['boxSettings']['size_x'] self.y_size", "run_dask_tools.get_dask_worker_status(curr_client, custom_index=custom_index_curr) get_worker_free_temp = run_dask_tools.check_free_resources(worker_status_temp) custom_index_curr += 2 print('----------------TEST------------------') curr_item = queue_jobs[curr_index] test", "\\ \"--size_x {5} \" \\ \"--size_y {6} \" \\ \"--size_z {7} \" \\", "+ '_LeDock Parameter file' self.ledock_rmsd = 0.5 self.set_up_ledock_dock_blind_parameters(title=self.ledock_title, receptor_file=self.lepro_pdb_file, ledock_rmsd=self.ledock_rmsd, x_center=self.x_center, y_center=self.y_center, z_center=self.z_center)", "* from molmolpy.utils import folder_utils import json from molmolpy.utils import helper as hlp", "#big_future = client.scatter(pop_item, workers=[workstation_address], hash=False) big_future = pop_item task_g_mmpbsa = client.submit(run_dask_tools.run_gmmpbsa_using_dask, big_future, workers=[workstation_address],", "save_run_name}) data.update({'dask': {}}) traj_abs = data['absFileSave'] traj_file = open(traj_abs, 'rb') traj_mem = traj_file.read()", "1 def prepare_g_mmpbsa(self): ''' Prepare g_mmpbsa run folder and initial json configuration :return:", "'INPLIG ' + '{0}.inp\\n\\n'.format( self.ligand_flexaid_initials) # Specify to use one or multiple cleft(s)", "in the :ref:`User Guide <MoleculeObject>`. Parameters ---------- filename : str, optional The maximum", "and run Dock programs >>> EPI_uber_dock.prep_rDock_dock_run_commands() >>> EPI_uber_dock.run_rDock_simulation(parallel=True, waitTime=15) >>> >>> #This is", "results_dask['contrib_pol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() # out_pdbqt_filename = out_pdbqt_name # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num", "# Value of 0 or 1 ADAPTVGA 1 # Adaptive crossover and mutation", "and sd for ligand :return: ''' # self.output_receptor_rdock = Outputfile(\"mol2\", \"{0}.mol2\".format(self.receptor_name)) # self.output_receptor_rdock.write(self.receptor_pybel)", "is set to ', path) self.mgltools_utilities = path def set_flexaid_path(self, path): print('FlexAid path", "To include water molecules, uncomment the next line #INCHOH # Permeability allowed between", "= uber_docker.UberDockerObject(receptor_file, ligand_file, '.', molname=molname, receptor_name=receptor_name) >>> >>> >>> EPI_uber_dock.prepare_uber_dock_protocol() >>> EPI_uber_dock.run_uber_dock_protocol() Use", "process structure for docking using lepro # ./lepro_linux_x86 LasR_flexaid.pdb os.chdir(self.ledock_absolute_folder_name) command_receptor = self.ledock_path", "formats. Then converts to pandas dataframe. Create MoleculeObject by parsing pdb or pdbqt", "sample_data['ligand_clear_name'] + '.dok' # -spli MOR_flexaid.dok command_to_clean = \"{0} -spli {1}\".format(command_receptor, ligand_clear_dok) print(command_to_run)", "very slow # while len(run_docking_queue) != 40: # run_docking_queue += run_docking_LeDock[curr_LeDock:curr_LeDock + LeDock]", "* 10) return finished_jobs, finished_jobs_dict # @hlp.timeit def run_dask_gmmpbsa(self, client=None, max_jobs_to_run=10): # from", "# self.prep_LeDock_run = True # except: # print('LeDock_params simStates is empty verify yolo')", "\" \\ \"--num_modes {9} \" \\ \"--seed 10 \" \\ \"--log {10}.txt \"", "sys.exit(0) self.run_type_samples = run_type self.prep_samples_run = True self.samples_exhaust = standard_exhaust self.samples_run = list(range(1,", "+ '_' + self.molecule_name + '_' + 'rDock' # self.rdock_absolute_folder_name = self.uber_dock_folder +", "else: # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)] = results[key] # if filename is None and filedata is", "= md.load_xtc(self.trajectory_file, top=self.topology_file) self.trajectory_mdtraj_topology = self.trajectory_mdtraj.topology self.trajectory_mdtraj_topology_dataframe = self.trajectory_mdtraj.topology.to_dataframe() self.objects_loaded = True def", "# 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # # # self.state_data_samples = self.state_data.copy() # # self.save_state_data_json() #", "import multiprocessing import mdtraj as md from molmolpy.utils.cluster_quality import * from molmolpy.utils import", "file_save file_save_list.append(file_save) abs_file_save_list.append(abs_file_save) temp_state[str(i)].update({'runFinished':False, 'index':i, 'absFolder':folder_to_save, 'fileSave':file_save, 'absFileSave':abs_file_save, 'firstIndex':self.first_index, 'secondIndex':self.second_index, 'indexFile':self.index_file, 'mdpFile':self.mdp_file, 'tprFile':self.tpr_file})", "molmolpy.tools import run_dask_tools test = 1 curr_client = client worker_status = run_dask_tools.get_dask_worker_status(curr_client) get_worker_free", "# # # This is to run on dask server # # #", ":return: ''' flexaid_config_input_template = '''# Optimization method (genetic-algorithms) METOPT GA # The variation", "out_name = abs_folder + os.sep + results_dask['contrib_apol_filename'] out_mem = results_dask['contrib_apol_mem'] out_file = open(out_name,", "of CONFIG file ENDINP ''' final_str = '''''' # Specify the processed target", "{2} \" \\ \"--center_y {3} \" \\ \"--center_z {4} \" \\ \"--size_x {5}", "# print(\"Running Vina\") # TODO need to think about seed self.save_run_name = 'vina_'", "1 # pop_item = modified_submitted_jobs_dask.pop(i) try: if finished_jobs_dict[i] is True: continue except Exception", "state data :return: ''' # self.absolute_path = os.path.abspath(filename) self.load_state_called_samples = True print(os.path.abspath(__file__)) self.state_data_samples", "is empty verify yolo') # # test = 1 def prepare_g_mmpbsa(self): ''' Prepare", "# retries_num = 2 # # task = client.submit(run_dask_tools.run_vina_using_dask, # # data, #", "contrib_apol = 'contrib_apol_{0}.dat'.format(i) temp_state[str(i)].update({'energyMM':energy_mm, 'polar':polar, 'apolar':apolar, 'contrib_MM':contrib_mm, 'contrib_pol':contrib_pol, 'contrib_apol':contrib_apol}) temp_traj.save(abs_file_save) temp_state[str(i)].update({'fileSaved': True })", "not prep files') return 'Do not prep files' traj_len = len(self.trajectory_mdtraj) import math", "above copyright # notice, this list of conditions and the following disclaimer. #", "-spli MOR_flexaid.dok command_to_clean = \"{0} -spli {1}\".format(command_receptor, ligand_clear_dok) print(command_to_run) self.LeDock_command_run_list.append(command_to_run) print(\"Launching new Sim\")", "'mdpMem': mdp_mem}) data['dask'].update({'indexName': index_filename, 'indexMem': index_mem}) test = 1 # data['dask'].update({'cavFile':cav_file_mem }) #", "for molecular docking: a review # This will be for leDock # if", "Rotational DOF of the ligand (0) optimz2 = 'OPTIMZ 9999 {0} 0\\n\\n'.format(self.flexaid_res_chain) #", "\\ \"--center_y {3} \" \\ \"--center_z {4} \" \\ \"--size_x {5} \" \\", "while len(run_docking_queue) != 40: # run_docking_queue += run_docking_LeDock[curr_LeDock:curr_LeDock + LeDock] # curr_LeDock +=", "rather than using the Penultimate Rotamer Library #ROTOBS # Defines the grid spacing", "Extended radical plane # See McConkey et al. (2002) Bioinformatics. 18(10); 1365-1373 VCTPLA", "= True self.load_state_data_json(self.load_state_file) def prepare_ledock_settings(self): ''' Prepare ultraDock folder and initial json configuration", "os.sep + file_save file_save_list.append(file_save) abs_file_save_list.append(abs_file_save) temp_state[str(i)].update({'runFinished':False, 'index':i, 'absFolder':folder_to_save, 'fileSave':file_save, 'absFileSave':abs_file_save, 'firstIndex':self.first_index, 'secondIndex':self.second_index, 'indexFile':self.index_file,", "tpr_file = open(tpr_abs, 'rb') tpr_mem = tpr_file.read() tpr_filename = tpr_abs.split(os.sep)[-1] # mdp_abs= full_g_mmpbsa_data['mdpFile']", "self.uber_dock_folder + os.sep + self.rdock_folder_name # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) # self.directories =", "SHAREALF 4.0 SHAREPEK 5.0 SHARESCL 10.0 # Reproduction model # Values in [BOOM,STEADY]", "variation in degrees for the anchor dihedral of the ligand # Float in", "# TODO this works need to create a quiiee # # retries_num =", "self.state_data_samples['boxSettings']['center_x'] self.y_center = self.state_data_samples['boxSettings']['center_y'] self.z_center = self.state_data_samples['boxSettings']['center_z'] self.x_size = self.state_data_samples['boxSettings']['size_x'] self.y_size = self.state_data_samples['boxSettings']['size_y']", "LeDock] # curr_LeDock += LeDock # # test = 1 # run_docking_queue +=", "population is generated randomly POPINIMT RANDOM # Fitness function # Value in [LINEAR,PSHARE]", "'_' + self.ligand_name + '_LeDock Parameter file' self.ledock_rmsd = 0.5 self.set_up_ledock_dock_blind_parameters(title=self.ledock_title, receptor_file=self.lepro_pdb_file, ledock_rmsd=self.ledock_rmsd,", "= 'g_mmpbsa' self.state_data = {} self.state_data_samples = {} self.g_mmpbsa_run_finished = False self.g_mmpbsa_sim_states =", "self.state_data_samples['ligandFile'] self.exhaustiveness = self.state_data_samples['exhaustivenessList'] self.samples_run = self.state_data_samples['samplesList'] self.folder_path = self.state_data_samples['folderPath'] self.run_type = self.state_data_samples['runType']", "1 ADAPTVGA 1 # Adaptive crossover and mutation probabilities # Floats in interval", "+ '_' + self.run_type_samples + '.json' # This will hold information about run", "self.setup_box is False: print('Please setup simulation box') sys.exit(0) self.run_type_samples = run_type self.prep_samples_run =", ">>> >>> # This is for Autodock vina >>> EPI_uber_dock.set_up_Vina_Box() >>> EPI_uber_dock.prepare_Vina_run() >>>", "scatter for big files for upload G_MMPBSA files # test = 1 #", "== len(worker_ids): curr_worker = 0 print('-----------------------------------------------------------------') worker_status_temp = run_dask_tools.get_dask_worker_status(curr_client, custom_index=custom_index_curr) get_worker_free_temp = run_dask_tools.check_free_resources(worker_status_temp)", "mdp_file.read() mdp_filename = mdp_abs.split(os.sep)[-1] index_abs= full_g_mmpbsa_data['indexFile'] index_file = open(index_abs, 'r') index_mem = index_file.read()", "pre_job in run_mmpbsa_queue: # print(pre_job) if pre_job['runFinished'] is False: final_queue_job.append(pre_job) test = 1", "load json state data :return: ''' # self.absolute_path = os.path.abspath(filename) self.load_state_called_samples = True", "is inside pdbqt file # elif 'vina_sample_' in i: # VIP.append(i) return VIP", "# Ends reading of CONFIG file ENDINP ''' final_str = '''''' # Specify", "where N is NUMCHROM PRINTCHR 10 ''' generate_ga_dat = 'ga_inp_' + self.receptor_name +", "self.state_data_samples['samplesList'] self.folder_path = self.state_data_samples['folderPath'] self.run_type = self.state_data_samples['runType'] self.molecule_name = self.state_data_samples['molName'] self.receptor_name = self.state_data_samples['receptorName']", "# self.state_data['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] # results_dask = results[key]['dask'] # else:", "= self.state_data['runType'] self.molecule_name = self.state_data['molName'] self.receptor_name = self.state_data['receptorName'] # TODO test self.sim_folder_run =", "Vina] # curr_Vina += Vina # # test = 1 # test =", "DASK PROJECT # @author <NAME> # # <!-------------------------------------------------------------------------- # # Copyright (c) 2016-2019,<NAME>.", "json state data :return: ''' # self.absolute_path = os.path.abspath(filename) self.load_state_called = True print(os.path.abspath(__file__))", "OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR", "# self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) # self.directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.rdock_folder_name) # print('TADA ',", "or promote products derived # from this software without specific prior written permission.", "curr_item_prog = curr_item['Program'] ############################################################ # submitted_jobs_dask len less than 16 jobs_running = len(submitted_jobs_dask)", "created') self.trajectory_file = traj self.topology_file = topol self.tpr_file = tpr_file self.mdp_file = mdp_file", "folder and initial json configuration >>> EPI_uber_dock.prepare_rdock_settings() Convert with pybel to mol2 for", "self.ligand_ledock_mol2 self.receptor_pybel.write(\"pdb\", self.absolute_receptor_ledock_pdb, overwrite=True) self.ligand_pybel.write(\"mol2\", self.absolute_ligand_ledock_mol2, overwrite=True) self.ledock_folder_exists = True test = 1", "second_index self.prep_g_mmpbsa_run = False self.folder_exists = False # Running vina,whether it's for exhaustiveness", "y_center=self.y_center, z_center=self.z_center) else: print('state has beeen loaded \\n') ############################################################################## def flexaid_generate_ga_dat_parameters(self): ''' Generate", "mutation probability # Float in interval [0.0,1.0] # Only considered when ADAPTVGA is", "self.ligand_file = self.state_data_samples['ligandFile'] self.exhaustiveness = self.state_data_samples['exhaustivenessList'] self.samples_run = self.state_data_samples['samplesList'] self.folder_path = self.state_data_samples['folderPath'] self.run_type", "submitted_jobs_dask_temp, finished_jobs_temp = self.check_dask_jobs(submitted_jobs_dask,finished_jobs) finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) test = 1", "# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #", "preparation >>> self.run_uber_dock_protocol() or seperately >>> EPI_uber_dock.calculate_max_radius_from_com() >>> EPI_uber_dock.calculate_cube_edges() >>> EPI_uber_dock.calculate_box_edges_from_com() >>> >>>", "LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE)", "'absFolder':folder_to_save, 'fileSave':file_save, 'absFileSave':abs_file_save, 'firstIndex':self.first_index, 'secondIndex':self.second_index, 'indexFile':self.index_file, 'mdpFile':self.mdp_file, 'tprFile':self.tpr_file}) energy_mm = 'energy_MM_{0}.xvg'.format(i) polar =", "temp_mdtraj_indexes.append(i) file_save = 'traj_part{0}.xtc'.format(i) abs_file_save = folder_to_save + os.sep + file_save file_save_list.append(file_save) abs_file_save_list.append(abs_file_save)", "False else: self.load_state_file = self.json_state_file self.load_state_called = True self.load_state_data_json(self.load_state_file) def prepare_ledock_settings(self): ''' Prepare", ":param filename: Saves state file :return: ''' # import json # with open(filename,", "results_dask['contrib_apol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep +", "from molmolpy.utils import folder_utils import json from molmolpy.utils import helper as hlp #", "True: # # self.prepare_uber_docker() # self.prepare_ledock_settings() # self.prep_LeDock_dock_run_commands() @hlp.timeit def prep_LeDock_dock_run_commands(self, num_samples=10): '''", "run uber dock protocol for LeDock, rDock,FlexAid, Vina :return: ''' current_pid = multiprocessing.current_process().pid", "= len(self.trajectory_mdtraj) import math # Free core approach div_traj = math.ceil(traj_len/total_free_cores) # select_indexes", "gmmpbsa ', prep_g_mmpbsa) return 'Do not prepare run files' if self.g_mmpbsa_prepared is True:", "self.ledock_data['LeDock_params']['xmin'] # self.ledock_xmax = self.ledock_data['LeDock_params']['xmax'] # self.ledock_ymin = self.ledock_data['LeDock_params']['ymin'] # self.ledock_ymax = self.ledock_data['LeDock_params']['ymax']", "= client.submit(run_dask_tools.run_gmmpbsa_using_dask, big_future, workers=[workstation_address], key=run_name, retries=retries_num) submitted_jobs_dask.append(task_g_mmpbsa) else: key_name = curr_item['save_run_name'] run_name =", "+= y final_str += '\\n' rmsdst = 'RMSDST ' + '{0}_ref.pdb\\n\\n'.format( self.ligand_flexaid_initials) final_str", "out_file.close() out_name = abs_folder + os.sep + results_dask['apolar_filename'] out_mem = results_dask['apolar_mem'] out_file =", "{0} {1} {2}\\n'.format(self.flexaid_res_number, self.flexaid_res_chain, flex_index) flexible_index_list_phrases.append(temp_line) test = 1 final_str += pdbnam final_str", "9999 and chain A # Translational DOF of the ligand (-1) optimz1 =", "{0} \" \\ \"--ligand {1} \" \\ \"--center_x {2} \" \\ \"--center_y {3}", "get_worker_free = run_dask_tools.check_free_resources(worker_status) # # # test = 1 # # total_free_cores =", "= open(index_abs, 'r') index_mem = index_file.read() index_filename = index_abs.split(os.sep)[-1] # data_pre = self.state_data['energySoftware']['g_mmpbsa']", "9999 {0} -1\\n\\n'.format(self.flexaid_res_chain) # Rotational DOF of the ligand (0) optimz2 = 'OPTIMZ", "prep_LeDock_dock_run_commands(self, num_samples=10): ''' Prepare rdock run commands and save to json :param num_samples:", "strict about spaces :return: ''' flexaid_config_input_template = '''# Optimization method (genetic-algorithms) METOPT GA", "results[key]['dask'] original_data = self.state_data['energySoftware'][prog] abs_folder = self.g_mmpbsa_folder # original_data['AbsFolder'] out_name = abs_folder +", "LIG9999A inplig = 'INPLIG ' + '{0}.inp\\n\\n'.format( self.ligand_flexaid_initials) # Specify to use one", "= client.scatter(self.dask_prep, broadcast=True) # for worker in get_worker_free: # worker_info = get_worker_free[worker] #", "results_dask['polar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep +", "simulations, other submits jobs ############################################################################################### gmmbpsa_min_mem = 1000 retries_num = 2 curr_index =", "'_' + self.run_type + '.json' if len(self.directories) == 0: print('Creating folder for g_mmpbsa", "original_data = self.state_data['energySoftware'][prog] abs_folder = self.g_mmpbsa_folder # original_data['AbsFolder'] out_name = abs_folder + os.sep", "[1,N] where N is NUMCHROM PRINTCHR 10 ''' self.generate_ga_dat_pameters = True self.generate_ga_dat =", "self.state_data['folderCreated'] self.absolute_json_state_file = self.state_data['absoluteJsonStates'] self.g_mmpbsa_folder = self.state_data['RunFolder'] self.json_state_file = self.state_data['jsonStates'] test = 1", "curr_LeDock += LeDock # # test = 1 # run_docking_queue += run_docking_rDock[curr_rDock:curr_rDock +", "[] for i in flexible_bonds_data_text_list: if 'FLEDIH' in i: print(i) temp = i.split('", "\"wb\")) # TODO create folder for run saving state run # filename =", "high_seed) command_to_run = \"{0} {1}\".format(command_receptor, parm_name) ligand_clear_dok = sample_data['ligand_clear_name'] + '.dok' # -spli", "will hold information about run states self.g_mmpbsa_folder = self.get_uber_g_mmpbsa_run_folder_name() self.absolute_json_state_file = self.g_mmpbsa_folder +", "run_type self.prep_samples_run = True self.samples_exhaust = standard_exhaust self.samples_run = list(range(1, num_samples_run + 1))", "information or not? def load_samples_state_data_json(self, filename): ''' :param filename: load json state data", "+= run_docking_rDock[curr_rDock:curr_rDock + rDock] # curr_rDock += rDock # # run_docking_queue += run_docking_FlexAid[curr_FlexAid:curr_FlexAid", "interactions NOINTR # Side-chain rotamer acceptance threshold # Float in interval [0.0-1.0] DEECLA", ":))))))') print('---' * 10) print('\\n') def prepare_for_dask_cluster(self, LeDock=2, rDock=2, FlexAid=2, Vina=2, parallel=False): '''", "''' self.trajectory_mdtraj = md.load_xtc(self.trajectory_file, top=self.topology_file) self.trajectory_mdtraj_topology = self.trajectory_mdtraj.topology self.trajectory_mdtraj_topology_dataframe = self.trajectory_mdtraj.topology.to_dataframe() self.objects_loaded =", "for i in flexible_bonds_data_text_list: if 'FLEDIH' in i: print(i) temp = i.split(' ')", "# The variation in degrees for flexible dihedrals of the ligand # Float", "= run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num, data) # # test = 1 # # test = 1", "self.output_receptor_rdock.write(self.receptor_pybel) # self.output_receptor_rdock.close() # # self.output_ligand_rdock = Outputfile(\"sd\", \"{0}.sd\".format(self.ligand_name)) # self.output_ligand_rdock.write(self.ligand_pybel ) #", "# self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) # self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) # self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) #", "self.exhaustiveness, 'samples_exhaust': self.samples_exhaust, 'samplesList': self.samples_run, 'folderPath': self.folder_path, 'runType': self.run_type_samples, 'molName': self.molecule_name, 'receptorName': self.receptor_name,", "self.receptor_name + '_' + self.molecule_name + '_' + 'LeDock' self.ledock_absolute_folder_name = self.uber_dock_folder +", "def hold_nSec(self, n): for i in range(1, n + 1): print(i) time.sleep(1) #", "\" \\ \"--seed 10 \" \\ \"--log {10}.txt \" \\ \"--out {11}_out.pdbqt\".format(self.receptor_file, self.ligand_file,", "= None test = 1 # maybe 2 async threads, one checks finished", "code must retain the above copyright # notice, this list of conditions and", "to mol2 for receptor and sd for ligand :return: ''' # self.output_receptor_rdock =", "test = 1 self.run_mmpbsa_dask = final_queue_job # random.shuffle(self.run_docking_queue) print('Finished preparing g_mmpbsa jobs') #", "This part needs further refinement # # # break # # test =", "self.receptor_flexaid_mol2) # Specify the processed ligand file to use # BTN.inp has the", "+ self.molecule_name + '_' + self.run_type + '.json' if len(self.directories) == 0: print('Creating", "print('Submit MMPBSA job to DASK') pop_item = queue_jobs.pop(curr_index) key_name = pop_item['save_run_name'] run_name =", "so comment this part for a while >>> EPI_uber_dock.prepare_rdock_settings() >>> EPI_uber_dock.generate_rdock_cavity() >>> #", "the :ref:`User Guide <MoleculeObject>`. Parameters ---------- filename : str, optional The maximum distance", "+ self.receptor_name + '-' + self.ligand_name + '.dat' generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep", "'commandToClean':command_to_clean, 'dokFileName':ligand_clear_dok, 'runFinished': False}} self.LeDock_sim_states.update(temp_dict) self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict) # try: # os.system(command_to_run) # except KeyboardInterrupt:", "SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED", "self.setup_ledock_pameters is not False: # print(\"Running Vina\") # TODO need to think about", "out_mem = results_dask['polar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder +", "Defines the grid spacing of the binding-site # Float in interval [0.1,1.0] SPACER", "preped['freeCores'] if prep_g_mmpbsa is False: print('prep gmmpbsa ', prep_g_mmpbsa) return 'Do not prepare", "file :return: ''' # import json # with open(filename, 'w') as outfile: #", "''' prepare dask tasks for g_mmpbsa :return: ''' self.prepare_g_mmpbsa() test = 1 curr_client", "is True: print('Do not prep files') return 'Do not prep files' traj_len =", "for exhaustiveness or traditional run self.folder_path = folder_path self.command_run_list = [] self.command_samples_run_list =", "test = 1 # pop_item = modified_submitted_jobs_dask.pop(i) try: if finished_jobs_dict[i] is True: continue", "'absoluteJsonStates': self.absolute_json_state_file, 'directory': self.directories, 'folderCreated': self.folder_exists, 'simStates': {}}) self.state_data.update(programs_dict) # self.prepVinaSim_exhaust() self.save_state_data_json() self.load_state_called", "= first_index self.second_index = second_index self.prep_g_mmpbsa_run = False self.folder_exists = False # Running", "This way folder is buggy workstation_dir = original_get_worker_free[curr_worker_id]['preped']['workerDir'] workstation_freemem = workstation_preped_temp['freeMemory'] workstation_freecpu =", "# print i if 'out' in i: VIP.append(i) # This is not necessary", "job_quantity: finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) time.sleep(60) print('->' * 10) print('Everything is", "curr_index -= 1 pop_item.update({'workingDir':workstation_dir}) submitted_jobs.append(pop_item) # MAYBE CHECK FOLDER HERE # #big_future =", "target (water,metal,modified amino acids,cofactors,ligands) # To exclude these groups, uncomment the next line", "if self.setup_ledock_pameters is not False: # print(\"Running Vina\") # TODO need to think", "', prep_g_mmpbsa) return 'Do not prepare run files' if self.g_mmpbsa_prepared is True: print('Do", "files') return 'Do not prep files' traj_len = len(self.trajectory_mdtraj) import math # Free", "os.sep + results_dask['contrib_pol_filename'] out_mem = results_dask['contrib_pol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() #", "= 1000 retries_num = 2 curr_index = 0 curr_worker = 0 # prepare", "= open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['energyMM_filename'] out_mem", "self.folder_exists_samples, 'simStates': {}}) self.prepVinaSim_samples() self.save_state_data_json(filedata=self.state_data_samples, filename=self.json_samples_state_file) self.load_state_called_samples = False self.prep_sample_run = True else:", "self.ledock_zmin = self.ledock_data['LeDock_params']['zmin'] # self.ledock_zmax = self.ledock_data['LeDock_params']['zmax'] # # except: # print('LeDock_params is", "complexity O(n). Sparse neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References", "self.state_data_samples['boxSettings']['center_z'] self.x_size = self.state_data_samples['boxSettings']['size_x'] self.y_size = self.state_data_samples['boxSettings']['size_y'] self.z_size = self.state_data_samples['boxSettings']['size_z'] self.num_modes = self.state_data_samples['boxSettings']['numModes']", "= run_g_mmpbsa curr_LeDock = 0 # very slow # while len(run_docking_queue) != 40:", "self.ledock_ymax = self.ledock_data['LeDock_params']['ymax'] # self.ledock_zmin = self.ledock_data['LeDock_params']['zmin'] # self.ledock_zmax = self.ledock_data['LeDock_params']['zmax'] # #", "'r') index_mem = index_file.read() index_filename = index_abs.split(os.sep)[-1] # data_pre = self.state_data['energySoftware']['g_mmpbsa'] # data_pre.update({'dask':", "to endorse or promote products derived # from this software without specific prior", "will be for leDock # if prep_g_mmpbsa is True: # # self.prepare_uber_docker() #", "sd for ligand :return: ''' # self.output_receptor_rdock = Outputfile(\"mol2\", \"{0}.mol2\".format(self.receptor_name)) # self.output_receptor_rdock.write(self.receptor_pybel) #", "test = 1 # data['dask'].update({'cavFile':cav_file_mem }) # self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)] = data test = 1", "run states self.g_mmpbsa_folder = self.get_uber_g_mmpbsa_run_folder_name() self.absolute_json_state_file = self.g_mmpbsa_folder + os.sep + self.receptor_name +", "interval [1,N] where N is NUMCHROM PRINTCHR 10 ''' generate_ga_dat = 'ga_inp_' +", "more in the :ref:`User Guide <MoleculeObject>`. Parameters ---------- filename : str, optional The", "= False else: self.load_state_file = self.json_state_file self.load_state_called = True self.load_state_data_json(self.load_state_file) def prepare_ledock_settings(self): '''", "''' Prepare receptor mdtraj object get mdtraj topology and save as pandas dataframe", "self.save_run_name = \"ledock_{0}_sample_{1}\".format(self.run_type, sample_num) random_seed = np.random.randint(low_seed, high_seed) command_to_run = \"{0} {1}\".format(command_receptor, parm_name)", "= \"ledock_{0}_sample_{1}\".format(self.run_type, sample_num) random_seed = np.random.randint(low_seed, high_seed) command_to_run = \"{0} {1}\".format(command_receptor, parm_name) ligand_clear_dok", "other materials provided # with the distribution. # * Neither the name of", "get_worker_free: # preped = get_worker_free[worker]['preped'] # total_free_cores += preped['freeCores'] if prep_g_mmpbsa is False:", "part needs clarification self.prep_mdtraj_object() # original data before transformation # Add receptor name", "derived # from this software without specific prior written permission. # THIS SOFTWARE", "== 0: print('Creating folder for g_mmpbsa run\\n') print(self.sim_folder_run) folder_utils.create_folder(self.sim_folder_run) self.folder_exists = True programs_dict", "= self.ledock_data['paramFull'] # except: # print('LeDock setting part is empty verify yolo') #", "= 'RMSDST ' + '{0}_ref.pdb\\n\\n'.format( self.ligand_flexaid_initials) final_str += rmsdst final_str += flexaid_config_input_template generate_config_input_file", "= self.absolute_json_state_file filedata = self.state_data # elif filedata is not None: # filedata", "[] file_save_list = [] abs_file_save_list = [] simStates = {'simStates':{}} for i,traj in", "materials provided # with the distribution. # * Neither the name of the", "extra line for each flexible bond of the ligand # The allowable flexible", "run_type='samples_run'): if self.setup_box is False: print('Please setup simulation box') sys.exit(0) self.run_type_samples = run_type", "filename : str, optional The maximum distance between two samples for them to", "LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True) >>> # >>> # LasR_MOR_mmpbsa_calc.run_dask_docking(client) Notes ----- See examples/cluster/plot_dbscan.py for an example.", "KeyboardInterrupt: # # quit # sys.exit() print(\"LeDock command generation finished\") else: print('Please setup", "return 'Do not prep files' traj_len = len(self.trajectory_mdtraj) import math # Free core", "\"r\")) # os.chdir('HSL_exhaustiveness') self.trajectory_file = self.state_data['trajectoryFile'] self.mdp_file = self.state_data['mdpFile'] self.tpr_file = self.state_data['tprFile'] self.index_file", "# self.generate_ga_dat_object_file.close() # # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param': self.generate_ga_dat_pameters, #", "open(filename, 'w') as outfile: # json.dump(self.cluster_models, outfile) # pickle.dump(self.cluster_models, open(filename, \"wb\")) # TODO", "= 16 # Production # worker_status = run_dask_tools.get_dask_worker_status(curr_client) # # get_worker_free = run_dask_tools.check_free_resources(worker_status)", "try: VIP = [] for dirname, dirnames, filenames in os.walk(folder): for i in", "1 self.save_run_name = \"ledock_{0}_sample_{1}\".format(self.run_type, sample_num) random_seed = np.random.randint(low_seed, high_seed) command_to_run = \"{0} {1}\".format(command_receptor,", "in interval [1.0-30.0] VARANG 5.0 # The variation in degrees for the anchor", ">>> >>> >>> # This is for Autodock vina >>> EPI_uber_dock.set_up_Vina_Box() >>> EPI_uber_dock.prepare_Vina_run()", "Reproduction model # Values in [BOOM,STEADY] REPMODEL BOOM # Fraction of population to", "self.run_type = 'g_mmpbsa' self.state_data = {} self.state_data_samples = {} self.g_mmpbsa_run_finished = False self.g_mmpbsa_sim_states", "self.x_size = self.state_data_samples['boxSettings']['size_x'] self.y_size = self.state_data_samples['boxSettings']['size_y'] self.z_size = self.state_data_samples['boxSettings']['size_z'] self.num_modes = self.state_data_samples['boxSettings']['numModes'] def", "# * Redistributions of source code must retain the above copyright # notice,", "TODO test self.sim_folder_run = self.state_data['simRunFolder'] # .split('/')[-1] self.directories = self.state_data['directory'] self.folder_exists = self.state_data['folderCreated']", "submitted_jobs_dask, finished_jobs, finished_jobs_dict): import copy # modified_submitted_jobs_dask = copy.deepcopy(submitted_jobs_dask) for i, job in", "custom_index=custom_index_curr) get_worker_free_temp = run_dask_tools.check_free_resources(worker_status_temp) custom_index_curr += 2 print('----------------TEST------------------') curr_item = queue_jobs[curr_index] test =", "uncomment the next line #EXCHET # Include water molecules in the target (always", ">>> run_type = 'vina_sample' >>> >>> >>> >>> receptor_file = EPI_folder + os.sep", "import run_dask_tools test = 1 curr_client = client worker_status = run_dask_tools.get_dask_worker_status(curr_client) get_worker_free =", "self.rdock_folder_name # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) # self.directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.rdock_folder_name) # print('TADA", "self.json_state_file # filename = self.absolute_json_state_file # filedata = self.state_data self.save_state_data_json(filedata=self.before_dask, filename=self.absolute_json_state_file) # allow", "len(finished_jobs) max_jobus = max_jobs_to_run # g_mmpbsa part if curr_item_prog == 'g_mmpbsa': if workstation_freemem", "apolar = 'apolar_{0}.xvg'.format(i) contrib_mm = 'contrib_MM_{0}.dat'.format(i) contrib_pol = 'contrib_pol_{0}.dat'.format(i) contrib_apol = 'contrib_apol_{0}.dat'.format(i) temp_state[str(i)].update({'energyMM':energy_mm,", "+ os.sep + self.run_folder_name # Create folder don't forget # self.directories = self.find_sample_folders(self.folder_path,", "to select those that are not finished for pre_job in run_mmpbsa_queue: # print(pre_job)", "sample_num = results[key]['part_num'] results_dask = results[key]['dask'] original_data = self.state_data['energySoftware'][prog] abs_folder = self.g_mmpbsa_folder #", "= workstation_info_temp['preped'] workstation_address = workstation_preped_temp['workerAddress'] # This way folder is buggy workstation_dir =", "task = client.submit(run_dask_tools.upload_g_mmpbsa_files_dask, # big_future, # workers=[worker_address], # key='key_scatter_{0}'.format(worker_address), # retries=retries_num) # tasks_upload.append(task)", "outfile: # json.dump(self.cluster_models, outfile) # pickle.dump(self.cluster_models, open(filename, \"wb\")) # TODO create folder for", "1 time.sleep(10) test = 1 # ############################################################################################### # # # work_address = workstation1_preped['workerAddress']", "\\ \"--out {11}_out.pdbqt\".format(self.receptor_file, self.ligand_file, self.x_center, self.y_center, self.z_center, self.x_size, self.y_size, self.z_size, self.samples_exhaust, self.num_modes, self.save_run_name,", "prepare each separate rDock run command :param sample_num: :param pose_gen: default generate 20", "object loading of pdb and pbdqt file formats. Then converts to pandas dataframe.", "curr_item['Program'] ############################################################ # submitted_jobs_dask len less than 16 jobs_running = len(submitted_jobs_dask) - len(finished_jobs)", "self.folder_exists = self.state_data_samples['folderCreated'] self.x_center = self.state_data_samples['boxSettings']['center_x'] self.y_center = self.state_data_samples['boxSettings']['center_y'] self.z_center = self.state_data_samples['boxSettings']['center_z'] self.x_size", "{1} \" \\ \"--center_x {2} \" \\ \"--center_y {3} \" \\ \"--center_z {4}", "waitTime=15) >>> >>> >>> # This is for Autodock vina >>> EPI_uber_dock.set_up_Vina_Box() >>>", "thought out #################################################################################################################### def prepare_samples_collection_run(self, standard_exhaust=128, num_samples_run=100, run_type='samples_run'): if self.setup_box is False: print('Please", "self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex': self.first_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex': self.second_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile': self.index_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile': self.mdp_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile': self.tpr_file}) self.save_state_data_json() test =", "the documentation and/or other materials provided # with the distribution. # * Neither", "prog = results[key]['Program'] # need [0] key sample_num = results[key]['part_num'] if prog ==", "0 else: curr_index += 1 curr_worker += 1 time.sleep(10) test = 1 #", "'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contribMM_filename'] out_mem = results_dask['contribMM_mem']", "get_uber_g_mmpbsa_run_folder_name(self): curr_folder = os.getcwd() return curr_folder + os.sep + self.run_folder_name def prepare_g_mmpbsa_dask_protocol(self, dask_client=None,", "= self.state_data self.save_state_data_json(filedata=self.before_dask, filename=self.absolute_json_state_file) # allow CPU to cool down # self.hold_nSec(5) print('This", "EXCHET is disabled # To include water molecules, uncomment the next line #INCHOH", "= copy.deepcopy(self.state_data) ################################################################################ if self.g_mmpbsa_prepared is True: full_g_mmpbsa_data = self.state_data['energySoftware']['g_mmpbsa'] test = 1", "OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS", "# <!-------------------------------------------------------------------------- # # Copyright (c) 2016-2019,<NAME>. # All rights reserved. # Redistribution", "# Permeability for side-chain rotamer acceptance # Float in interval [0.0,1.0] from fully", "penalty # When the value is 0.0 the solvent interactions are derived from", "jobs state print('-------') if curr_index == 0 and len(submitted_jobs_dask) == 1: curr_index =", "-1\\n\\n'.format(self.flexaid_res_chain) # Rotational DOF of the ligand (0) optimz2 = 'OPTIMZ 9999 {0}", ">>> EPI_uber_dock.prep_rDock_dock_run_commands() >>> EPI_uber_dock.run_rDock_simulation(parallel=True, waitTime=15) >>> >>> #This is for FlexAid >>> EPI_uber_dock.prepare_flexaid_settings()", "self.sim_folder_run_samples, 'directory': self.directories_samples, 'setup': self.setup_box, 'folderCreated': self.folder_exists_samples, 'simStates': {}}) self.prepVinaSim_samples() self.save_state_data_json(filedata=self.state_data_samples, filename=self.json_samples_state_file) self.load_state_called_samples", "has beeen loaded \\n') ############################################################################## def flexaid_generate_ga_dat_parameters(self): ''' Generate GA dat parameters for", ">>> EPI_uber_dock.set_up_Vina_Box() >>> EPI_uber_dock.prepare_Vina_run() >>> EPI_uber_dock.prepVinaSim_uberDock() >>> EPI_uber_dock.runVinaSim_uber() Molecule object loading of pdb", "= 'traj_part{0}.xtc'.format(i) abs_file_save = folder_to_save + os.sep + file_save file_save_list.append(file_save) abs_file_save_list.append(abs_file_save) temp_state[str(i)].update({'runFinished':False, 'index':i,", "curr_worker_id) print('Passed running ', run_name) # submitted_jobs_dask_temp, finished_jobs_temp = self.check_dask_jobs(submitted_jobs_dask,finished_jobs) finished_jobs, finished_jobs_dict =", "', i) except Exception as error: print('error is ', error) # print('i is", "# # self.save_state_data_json() # TODO this part needs to be thought out ####################################################################################################################", "contrib_pol = 'contrib_pol_{0}.dat'.format(i) contrib_apol = 'contrib_apol_{0}.dat'.format(i) temp_state[str(i)].update({'energyMM':energy_mm, 'polar':polar, 'apolar':apolar, 'contrib_MM':contrib_mm, 'contrib_pol':contrib_pol, 'contrib_apol':contrib_apol}) temp_traj.save(abs_file_save)", "about seed self.save_run_name = 'vina_' + self.run_type_samples + '_' + str(sample_num) command_to_run =", "EPI_uber_dock.prepare_flexaid_settings() >>> EPI_uber_dock.process_flexaid_ligand() >>> EPI_uber_dock.get_flexaid_clefts() >>> EPI_uber_dock.flexaid_generate_ga_dat_parameters() >>> EPI_uber_dock.flexaid_generate_config_input() >>> EPI_uber_dock.prep_FlexAid_dock_run_commands() >>> EPI_uber_dock.run_FlexAid_simulation(parallel=True,", "needs further refinement # # # break # # test = 1 #", "= 1 # # # Try to load initial LeDock try: self.mdtraj_frames =", "= self.ledock_data['LeDock_params']['xmax'] # self.ledock_ymin = self.ledock_data['LeDock_params']['ymin'] # self.ledock_ymax = self.ledock_data['LeDock_params']['ymax'] # self.ledock_zmin =", "copy original_get_worker_free = copy.deepcopy(get_worker_free) # TEST IT WORKS # queue_jobs = self.run_mmpbsa_dask #", "print(pre_job) if pre_job['runFinished'] is False: final_queue_job.append(pre_job) test = 1 self.run_mmpbsa_dask = final_queue_job #", "'contrib_pol':contrib_pol, 'contrib_apol':contrib_apol}) temp_traj.save(abs_file_save) temp_state[str(i)].update({'fileSaved': True }) simStates['simStates'].update(temp_state) self.mdtraj_frames = select_frames self.mdtraj_sliced = temp_mdtraj", "+ '_' + self.molecule_name + '_' + 'LeDock' self.ledock_absolute_folder_name = self.uber_dock_folder + os.sep", ">>> >>> >>> EPI_uber_dock = uber_docker.UberDockerObject(receptor_file, ligand_file, '.', molname=molname, receptor_name=receptor_name) >>> >>> >>>", "interval [1,N] where N is NUMCHROM PRINTCHR 10 ''' self.generate_ga_dat_pameters = True self.generate_ga_dat", "in degrees for the anchor angle of the ligand # Float in interval", "self.ligand_flexaid_initials) final_str += rmsdst final_str += flexaid_config_input_template generate_config_input_file = 'CONFIG_' + self.receptor_name +", "Redistribution and use in source and binary forms, with or without # modification,", "# 'GA_DataName': self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # # # self.state_data_samples", "self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.ledock_directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.ledock_folder_name) print('TADA ', self.ledock_directories) test =", "self.trajectory_mdtraj_topology_dataframe = self.trajectory_mdtraj.topology.to_dataframe() self.objects_loaded = True def get_uber_g_mmpbsa_run_folder_name(self): curr_folder = os.getcwd() return curr_folder", "= 1 ################################################################################################### test = 1 #################################################################################################### self.run_mmpbsa_dask = run_g_mmpbsa curr_LeDock = 0", "self.trajectory_file, 'mdpFile': self.mdp_file, 'tprFile': self.tpr_file, 'indexFile': self.index_file, 'runFolderName': self.run_folder_name, 'folderPath': self.folder_path, 'jsonStates': self.json_state_file,", "''' Prepare ultraDock folder and initial json configuration >>> EPI_uber_dock.prepare_rdock_settings() Convert with pybel", "dir_name=self.run_type) self.directories = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name) print('TADA ', self.directories) self.json_state_file = self.sim_folder_run + os.sep", "cores # TODO article Pagadala Software for molecular docking: a review # This", "groups in the target (water,metal,modified amino acids,cofactors,ligands) # To exclude these groups, uncomment", "in interval [1-N] NUMGENER 500 # Use Adaptive Genetic-Algorithm # Value of 0", "= 'CONFIG_' + self.receptor_name + '-' + self.ligand_name + '.inp' return generate_config_input_file, final_str", "self.hold_nSec(5) print('This success ---> ', i) except Exception as error: print('error is ',", "self.state_data['dockSoftware']['FlexAid']['GA_params'].update( {'generateGA_param': self.generate_ga_dat_pameters, 'GA_DataName': self.generate_ga_dat, 'GA_DATA_Abs': self.generate_ga_dat_name_abs, 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy()", "1 self.receptor_ledock_pdb = \"{0}.pdb\".format(self.receptor_name) self.ligand_ledock_mol2 = \"{0}.mol2\".format(self.ligand_name) self.absolute_receptor_ledock_pdb = self.ledock_absolute_folder_name + os.sep +", "def prepare_for_dask_cluster(self, LeDock=2, rDock=2, FlexAid=2, Vina=2, parallel=False): ''' run uber dock protocol for", "BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF", "ligand_clear_dok = sample_data['ligand_clear_name'] + '.dok' # -spli MOR_flexaid.dok command_to_clean = \"{0} -spli {1}\".format(command_receptor,", "json # with open(filename, 'w') as outfile: # json.dump(self.cluster_models, outfile) # pickle.dump(self.cluster_models, open(filename,", "sample run command prep finished\") else: print('Please setup vina box settings') # except", "Prepare ultraDock folder and initial json configuration >>> EPI_uber_dock.prepare_rdock_settings() Convert with pybel to", "hold information about run states self.g_mmpbsa_folder = self.get_uber_g_mmpbsa_run_folder_name() self.absolute_json_state_file = self.g_mmpbsa_folder + os.sep", "helper as hlp # matplotlib.style.use('ggplot') sns.set(style=\"darkgrid\") low_seed = 1 high_seed = 999999999 mgltools_utilities", ">>> # LasR_MOR_mmpbsa_calc.run_dask_docking(client) Notes ----- See examples/cluster/plot_dbscan.py for an example. This implementation bulk-computes", "run_dask_tools.get_dask_worker_status(curr_client) get_worker_free = run_dask_tools.check_free_resources(worker_status) import copy original_get_worker_free = copy.deepcopy(get_worker_free) # TEST IT WORKS", "dihedrals of the ligand # Float in interval [1.0-30.0] VARFLX 10.0 # Use", "+ self.receptor_ledock_pdb self.absolute_ligand_ledock_mol2 = self.ledock_absolute_folder_name + os.sep + self.ligand_ledock_mol2 self.receptor_pybel.write(\"pdb\", self.absolute_receptor_ledock_pdb, overwrite=True) self.ligand_pybel.write(\"mol2\",", "tpr_file self.mdp_file = mdp_file self.index_file = index_file self.first_index = first_index self.second_index = second_index", "len(self.directories_samples) == 0: print('Creating folder for vina samples run\\n') print('Vina run type: {0}'.format(self.run_type_samples))", "EPI_uber_dock.runVinaSim_uber() Molecule object loading of pdb and pbdqt file formats. Then converts to", "notice, this list of conditions and the following disclaimer. # * Redistributions in", "Intragenic crossovers are possible INTRAGEN # Specifies that the initial population is generated", "else: curr_index += 1 curr_worker += 1 time.sleep(10) test = 1 # ###############################################################################################", "lepro # ./lepro_linux_x86 LasR_flexaid.pdb os.chdir(self.ledock_absolute_folder_name) command_receptor = self.ledock_path + os.sep + 'lepro_linux_x86' +", "# self.state_data_samples = self.state_data.copy() self.save_state_data_json() # TODO this part needs to be thought", "{'generateGA_param': self.generate_ga_dat_pameters, # 'GA_DataName': self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples", "number 9999 and chain A # Translational DOF of the ligand (-1) optimz1", "[0.0,1.0] # Only considered when ADAPTVGA is 0 CROSRATE 0.90 # Constant mutation", "16 # Production # worker_status = run_dask_tools.get_dask_worker_status(curr_client) # # get_worker_free = run_dask_tools.check_free_resources(worker_status) #", "curr_folder = os.getcwd() return curr_folder + os.sep + self.run_folder_name def get_samples_run_folder_name(self): curr_folder =", "i in flexible_bonds_data_text_list: if 'FLEDIH' in i: print(i) temp = i.split(' ') print(temp)", "Use Vcontacts indexing VINDEX # Vcontacts plane definition # Value in [B,R,X] for", "id in enumerate(get_worker_free): worker_ids.update({i: id}) custom_index_curr = 3 while len(queue_jobs) > 0: if", "%s secs have pass' % (n)) @hlp.timeit def prepVinaSampleCommand(self, sample_num): # try: if", "utf-8 -*- # !/usr/bin/env python # # @file __init__.py # @brief G_MMPBSA DASK", ":param num_samples: test value 6 :return: ''' try: self.g_mmpbsa_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] self.ledock_samples =", "job_quantity = len(queue_jobs) finished_jobs = [] finished_jobs_dict = {} worker_status_free = None test", "pop_item.update({'workingDir':workstation_dir}) submitted_jobs.append(pop_item) # MAYBE CHECK FOLDER HERE # #big_future = client.scatter(pop_item, workers=[workstation_address], hash=False)", "interval [1.0-30.0] VARANG 5.0 # The variation in degrees for the anchor dihedral", "the grid spacing of the binding-site # Float in interval [0.1,1.0] SPACER 0.375", "Float in interval [1.0-30.0] VARDIH 5.0 # The variation in degrees for flexible", "optimz2 = 'OPTIMZ 9999 {0} 0\\n\\n'.format(self.flexaid_res_chain) # Add one extra line for each", "binding-site # Float in interval [0.1,1.0] SPACER 0.375 # Exclude hetero groups in", "self.absolute_path = os.path.abspath(filename) self.load_state_called_samples = True print(os.path.abspath(__file__)) self.state_data_samples = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness')", "0.375 # Exclude hetero groups in the target (water,metal,modified amino acids,cofactors,ligands) # To", "is 0.0 the solvent interactions are derived from the interaction matrix # Float", "{'receptor_pdb': self.receptor_ledock_pdb, 'ligand_mol2': self.ligand_ledock_mol2, 'lepro_pdb': self.lepro_pdb_file, 'lepro_abs_pdb': self.ledock_absolute_folder_name + os.sep + self.lepro_pdb_file, 'abs_receptor_pdb':", "# retries=retries_num) # tasks_upload.append(task) # print(\"Starting uploading to \", worker_address) test = 1", "'w') # self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) # self.generate_ga_dat_object_file.close() # # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( #", "# LasR_MOR_mmpbsa_calc.run_dask_docking(client) Notes ----- See examples/cluster/plot_dbscan.py for an example. This implementation bulk-computes all", "os.sep + self.receptor_name + '_' + self.molecule_name + '.json' if filename is None", "pose_gen: default generate 20 poses :return: ''' try: if self.setup_ledock_pameters is not False:", "self.load_state_file = load_state_file if load_state_file is not None: self.load_state_data_json(self.load_state_file) else: print('G_MMPBSA Object has", "# except: # print('LeDock_params is empty verify yolo') # # try: # self.LeDock_sim_states", "= self.state_data_samples['simRunFolder'] # .split('/')[-1] self.directories_samples = self.state_data_samples['directory'] self.setup_box = self.state_data_samples['setup'] self.folder_exists = self.state_data_samples['folderCreated']", "= load_state_file if load_state_file is not None: self.load_state_data_json(self.load_state_file) else: print('G_MMPBSA Object has been", "of the ligand # The allowable flexible bonds are listed as FLEDIH lines", "# allow CPU to cool down # self.hold_nSec(5) print('This success ---> ', i)", "= [] # big_future = client.scatter(self.dask_prep, broadcast=True) # for worker in get_worker_free: #", "work_address = workstation1_preped['workerAddress'] # # # # # This is to run on", "neighborhood. >>> LasR_MOR_mmpbsa_calc = g_mmpbsa_dask.GMMPBSAObject(traj, topol_file, tpr_file, mdp_file, index_file, first_index, second_index, molname, receptor_name)", "allow CPU to cool down # self.hold_nSec(5) print('This success ---> ', i) except", "self.save_state_data_json() # TODO this part needs to be thought out #################################################################################################################### def flexaid_generate_ga_dat_parameters_dask(self):", "# Divide trajectory to number of free cores # TODO article Pagadala Software", "'folderCreated': self.folder_exists, 'simStates': {}}) self.state_data.update(programs_dict) # self.prepVinaSim_exhaust() self.save_state_data_json() self.load_state_called = False else: self.load_state_file", "original_get_worker_free = copy.deepcopy(get_worker_free) # TEST IT WORKS # queue_jobs = self.run_mmpbsa_dask # job_test", "{6} \" \\ \"--size_z {7} \" \\ \"--exhaustiveness {8} \" \\ \"--num_modes {9}", "# test = 1 # run_docking_queue += run_docking_rDock[curr_rDock:curr_rDock + rDock] # curr_rDock +=", "with pybel to mol2 for receptor and sd for ligand :return: ''' #", "by parsing pdb or pdbqt file. 2 types of parsers can be used:", "pop_item = queue_jobs.pop(curr_index) key_name = pop_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Cur run ',", "self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) # self.generate_ga_dat_object_file.close() # # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param': self.generate_ga_dat_pameters,", "range(1, n + 1): print(i) time.sleep(1) # Delay for 1 sec print('Ok %s", "try: dir_names = [] for dirname, dirnames, filenames in os.walk(folder_path): # print(dirname, '-')", "finished_jobs, finished_jobs_dict) time.sleep(60) print('->' * 10) print('Everything is finished :))))))') print('---' * 10)", "of information or not? def load_state_data_json(self, filename): ''' :param filename: load json state", ":return: ''' self.trajectory_mdtraj = md.load_xtc(self.trajectory_file, top=self.topology_file) self.trajectory_mdtraj_topology = self.trajectory_mdtraj.topology self.trajectory_mdtraj_topology_dataframe = self.trajectory_mdtraj.topology.to_dataframe() self.objects_loaded", "False self.g_mmpbsa_prepared = False # This part needs clarification self.prep_mdtraj_object() # original data", "list(results.keys())[0] prog = results[key]['Program'] # need [0] key sample_num = results[key]['part_num'] if prog", "#This is for FlexAid >>> EPI_uber_dock.prepare_flexaid_settings() >>> EPI_uber_dock.process_flexaid_ligand() >>> EPI_uber_dock.get_flexaid_clefts() >>> EPI_uber_dock.flexaid_generate_ga_dat_parameters() >>>", "'_' + self.molecule_name + '_' + self.run_type + '.json' if len(self.directories) == 0:", "'''# Number of chromosomes (number individuals in the population) # Integer in interval", "else: print('G_MMPBSA Object has been created') self.trajectory_file = traj self.topology_file = topol self.tpr_file", "update index # print(curr_item) # How to save submitted jobs state print('-------') if", "# Constant crossover probability # Float in interval [0.0,1.0] # Only considered when", "self.generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' + self.ligand_name + '.dat' self.generate_ga_dat_name_abs =", "index_mem = index_file.read() index_filename = index_abs.split(os.sep)[-1] # data_pre = self.state_data['energySoftware']['g_mmpbsa'] # data_pre.update({'dask': {}})", "= open(self.generate_ga_dat_name_abs, 'w') self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) self.generate_ga_dat_object_file.close() self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) self.state_data['dockSoftware']['FlexAid']['GA_params'].update( {'generateGA_param': self.generate_ga_dat_pameters, 'GA_DataName': self.generate_ga_dat, 'GA_DATA_Abs':", "self.ledock_param_title = self.ledock_data['LeDock_params']['title'] # self.rdock_title = self.ledock_data['LeDock_params']['title'] # # self.receptor_file_ledock = self.ledock_data['LeDock_params']['receptorFile'] #", "empty verify yolo') # # test = 1 # # try: # self.setup_ledock_pameters", "ligand_file = EPI_folder + os.sep + 'EPI.pdbqt' >>> molname = 'EPI' >>> receptor_name", "# g_mmpbsa part if curr_item_prog == 'g_mmpbsa': if workstation_freemem > gmmbpsa_min_mem and jobs_running", "False # This part needs clarification self.prep_mdtraj_object() # original data before transformation #", "# data_pre.update({'dask': {}}) data_pre = {} data_pre.update({'tprName':tpr_filename, 'tprMem':tpr_mem}) data_pre.update({'mdpName':mdp_filename, 'mdpMem':mdp_mem}) data_pre.update({'indexName':index_filename, 'indexMem':index_mem}) self.dask_prep", "loading of pdb and pbdqt file formats. Then converts to pandas dataframe. Create", "= 'OPTIMZ {0} {1} {2}\\n'.format(self.flexaid_res_number, self.flexaid_res_chain, flex_index) flexible_index_list_phrases.append(temp_line) test = 1 final_str +=", "self.prepVinaSim_samples() self.save_state_data_json(filedata=self.state_data_samples, filename=self.json_samples_state_file) self.load_state_called_samples = False self.prep_sample_run = True else: self.load_state_file_samples = self.json_samples_state_file", "'gold', 'darkorange']) import multiprocessing import mdtraj as md from molmolpy.utils.cluster_quality import * from", "is empty verify yolo') # # test = 1 # # try: #", "#################################################################################################### self.run_mmpbsa_dask = run_g_mmpbsa curr_LeDock = 0 # very slow # while len(run_docking_queue)", "= client.scatter(pop_item, workers=[workstation_address], hash=False) big_future = pop_item task_g_mmpbsa = client.submit(run_dask_tools.run_gmmpbsa_using_dask, big_future, workers=[workstation_address], key=run_name,", "+ '_' + self.molecule_name + '_' + self.run_type self.sim_folder_run = self.folder_path + os.sep", "source code must retain the above copyright # notice, this list of conditions", "abs_folder + os.sep + results_dask['energyMM_filename'] out_mem = results_dask['energyMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem)", "out_name = abs_folder + os.sep + results_dask['polar_filename'] out_mem = results_dask['polar_mem'] out_file = open(out_name,", "worker in get_worker_free: # worker_info = get_worker_free[worker] # worker_address = worker_info['preped']['workerAddress'] # #", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR", ":return: ''' current_pid = multiprocessing.current_process().pid print(\"Main Process with PID:{}\".format(current_pid)) # free_threads_for_Vina = num_threads", ">>> >>> LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client) >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True) >>> # >>> # LasR_MOR_mmpbsa_calc.run_dask_docking(client) Notes", "def __init__(self, traj, topol, tpr_file, mdp_file, index_file, first_index, second_index, molname='Unknown', receptor_name='Unknown', folder_path='.', job_name", "= self.state_data_samples['samplesList'] self.folder_path = self.state_data_samples['folderPath'] self.run_type = self.state_data_samples['runType'] self.molecule_name = self.state_data_samples['molName'] self.receptor_name =", "self.state_data_samples['folderCreated'] self.x_center = self.state_data_samples['boxSettings']['center_x'] self.y_center = self.state_data_samples['boxSettings']['center_y'] self.z_center = self.state_data_samples['boxSettings']['center_z'] self.x_size = self.state_data_samples['boxSettings']['size_x']", "workstation_preped_temp = workstation_info_temp['preped'] workstation_address = workstation_preped_temp['workerAddress'] # This way folder is buggy workstation_dir", "__init__.py # @brief G_MMPBSA DASK PROJECT # @author <NAME> # # <!-------------------------------------------------------------------------- #", "get_worker_free = run_dask_tools.check_free_resources(worker_status) import copy original_get_worker_free = copy.deepcopy(get_worker_free) # TEST IT WORKS #", "{4} \" \\ \"--size_x {5} \" \\ \"--size_y {6} \" \\ \"--size_z {7}", "if workstation_freemem > gmmbpsa_min_mem and jobs_running <max_jobus: print('Submit MMPBSA job to DASK') pop_item", "might need to get modified def find_sample_files(self, folder): try: VIP = [] for", "import copy original_get_worker_free = copy.deepcopy(get_worker_free) # TEST IT WORKS # queue_jobs = self.run_mmpbsa_dask", "indent=4) # TODO should I add json saving of information or not? def", "self.ledock_directories) test = 1 # This will hold information about run states #", "flexible_index_list_phrases.append(temp_line) test = 1 final_str += pdbnam final_str += inplig final_str += rgnopt_locclf", "IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF", "task = client.submit(run_dask_tools.run_vina_using_dask, # # data, # # workers=[work_address], # # key='key_test', #", "1 # run_docking_queue += run_docking_rDock[curr_rDock:curr_rDock + rDock] # curr_rDock += rDock # #", "crossovers are possible INTRAGEN # Specifies that the initial population is generated randomly", "{}}) self.state_data['dockSoftware']['FlexAid']['GA_params'].update( {'generateGA_param': self.generate_ga_dat_pameters, 'GA_DataName': self.generate_ga_dat, 'GA_DATA_Abs': self.generate_ga_dat_name_abs, 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples =", "for each flexible bond of the ligand # The allowable flexible bonds are", "Sample runSim: \", e) # sys.exit(0) def get_molecule_name(self): return self.molecule_name def get_receptor_name(self): return", "self.run_type_samples + '.json' # This will hold information about run states if len(self.directories_samples)", "pdb and pbdqt file formats. Then converts to pandas dataframe. Create MoleculeObject by", "TODO need to think about seed self.save_run_name = 'vina_' + self.run_type_samples + '_'", "def get_samples_run_folder_name(self): curr_folder = os.getcwd() print(\"Yippie yi kay\", curr_folder) return curr_folder + os.sep", "of any # contributors may be used to endorse or promote products derived", "worker_address) test = 1 # TODO # This part runs the main program", "1 # print('Last Check of submitted jobs') while len(finished_jobs) != job_quantity: finished_jobs, finished_jobs_dict", "2.0 # Number of results/docking poses to output MAXRES 20 # Only output", "= json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.receptor_file = self.state_data_samples['receptorFile'] self.ligand_file = self.state_data_samples['ligandFile'] self.exhaustiveness =", "self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories_samples = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name_samples) print('TADA ', self.directories_samples) self.json_samples_state_file = self.sim_folder_run_samples +", "data['dask'].update({'trajMem':traj_mem, 'trajName':traj_filename}) data['dask'].update({'tprName': tpr_filename, 'tprMem': tpr_mem}) data['dask'].update({'mdpName': mdp_filename, 'mdpMem': mdp_mem}) data['dask'].update({'indexName': index_filename, 'indexMem':", "Need to select those that are not finished for pre_job in run_mmpbsa_queue: #", "= self.state_data['energySoftware']['g_mmpbsa']['absFileList'] self.simStates = self.state_data['energySoftware']['g_mmpbsa']['simStates'] test = 1 self.g_mmpbsa_prepared = self.state_data['energySoftware']['g_mmpbsa']['prepare'] # self.state_data['energySoftware']['g_mmpbsa'].update({'frames':", "not None: self.load_state_data_json(self.load_state_file) else: print('G_MMPBSA Object has been created') self.trajectory_file = traj self.topology_file", "./lepro_linux_x86 LasR_flexaid.pdb os.chdir(self.ledock_absolute_folder_name) command_receptor = self.ledock_path + os.sep + 'lepro_linux_x86' + ' {0}", "Prepare rdock run commands and save to json :param num_samples: test value 6", "+ '_' + self.ligand_name + '_LeDock Parameter file' self.ledock_rmsd = 0.5 self.set_up_ledock_dock_blind_parameters(title=self.ledock_title, receptor_file=self.lepro_pdb_file,", "self.generate_ga_dat return [generate_ga_dat, ] # self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') # self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) # self.generate_ga_dat_object_file.close()", "self.run_folder_name_samples = self.receptor_name + '_' + self.molecule_name + '_' + self.run_type_samples self.sim_folder_run_samples =", "filedata=None, filename=None): ''' :param filename: Saves state file :return: ''' # import json", "for i,traj in zip(select_indexes,select_frames): temp_state = {str(i):{}} temp_traj = self.trajectory_mdtraj[traj:traj+div_traj] temp_mdtraj.append(temp_traj) temp_mdtraj_indexes.append(i) file_save", "IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN", "References ---------- \"\"\" def __init__(self, traj, topol, tpr_file, mdp_file, index_file, first_index, second_index, molname='Unknown',", "run commands and save to json :param num_samples: test value 6 :return: '''", "ledock_linux_x86 dock. in command_receptor = self.ledock_path + os.sep + 'ledock_linux_x86' sample_data = self.ledock_input_info[str(sample_num)]", "# curr_Vina += Vina # # test = 1 # test = 1", "is ', error) # print('i is ', i) print('Finished checking dask submissions ---\\n')", "5.0 SHARESCL 10.0 # Reproduction model # Values in [BOOM,STEADY] REPMODEL BOOM #", "finished_jobs = [] finished_jobs_dict = {} worker_status_free = None test = 1 #", "= 'ga_inp_' + self.receptor_name + '-' + self.ligand_name + '.dat' generate_ga_dat_name_abs = self.flexaid_absolute_input_folder", "'OPTIMZ 9999 {0} -1\\n\\n'.format(self.flexaid_res_chain) # Rotational DOF of the ligand (0) optimz2 =", "+ '_' + self.run_type self.sim_folder_run = self.folder_path + os.sep + self.run_folder_name # Create", "generate LeDock commands') except: self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list': self.ledock_samples}) self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states) for sample_num in self.ledock_samples: self.prep_LeDock_dock_command(sample_num) print('Now", "in degrees for flexible dihedrals of the ligand # Float in interval [1.0-30.0]", "\"--exhaustiveness {8} \" \\ \"--num_modes {9} \" \\ \"--seed 10 \" \\ \"--log", "LeDock=2, rDock=2, FlexAid=2, Vina=2, parallel=False): ''' run uber dock protocol for LeDock, rDock,FlexAid,", "can be used: 1.molmolpy 2. pybel Stores molecule information in pandas dataframe as", "# # quit # sys.exit() print(\"Vina sample run command prep finished\") else: print('Please", "EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ------------------------------------------------------------------------ --> import", "# # # # TODO this works need to create a quiiee #", "\"\"\" Usage example >>> EPI_folder = '/media/Work/MEGA/Programming/StressHormones/dock_EPI' >>> EPI_samples = '/media/Work/MEGA/Programming/StressHormones/' >>> >>>", "self.receptor_name + '-' + self.ligand_name + '.inp' return generate_config_input_file, final_str # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}})", "create a quiiee # # retries_num = 2 # # task = client.submit(run_dask_tools.run_vina_using_dask,", "run_dask_tools.check_free_resources(worker_status_temp) custom_index_curr += 2 print('----------------TEST------------------') curr_item = queue_jobs[curr_index] test = 1 curr_worker_id =", "the main program submitted_jobs = [] submitted_jobs_dask = [] queue_jobs = self.run_mmpbsa_dask job_quantity", "= [] submitted_jobs_dask = [] queue_jobs = self.run_mmpbsa_dask job_quantity = len(queue_jobs) finished_jobs =", "# # test = 1 # run_docking_queue += run_docking_rDock[curr_rDock:curr_rDock + rDock] # curr_rDock", "None test = 1 # maybe 2 async threads, one checks finished simulations,", "COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #", "of neighbors, while original DBSCAN had memory complexity O(n). Sparse neighborhoods can be", "# See McConkey et al. (2002) Bioinformatics. 18(10); 1365-1373 VCTPLA R # Use", "= {'simStates': {}} self.objects_loaded = False self.g_mmpbsa_prepared = False # This part needs", "self.y_size = self.state_data_samples['boxSettings']['size_y'] self.z_size = self.state_data_samples['boxSettings']['size_z'] self.num_modes = self.state_data_samples['boxSettings']['numModes'] def hold_nSec(self, n): for", "self.ligand_file, self.x_center, self.y_center, self.z_center, self.x_size, self.y_size, self.z_size, self.samples_exhaust, self.num_modes, self.save_run_name, self.save_run_name) print(command_to_run) self.command_samples_run_list.append(command_to_run)", "def set_molecule_name(self, mol_name): self.molecule_name = mol_name def set_receptor_name(self, receptor_name): self.receptor_name = receptor_name #", "Flexaid is very strict about spaces :return: ''' flexaid_config_input_template = '''# Optimization method", "files to all clients client.upload_file # task = client.submit(run_dask_tools.upload_g_mmpbsa_files_dask, # big_future, # workers=[worker_address],", "name of the molmolpy Developers nor the names of any # contributors may", "= mdp_file self.index_file = index_file self.first_index = first_index self.second_index = second_index self.prep_g_mmpbsa_run =", "client.submit(run_dask_tools.run_vina_using_dask, # # data, # # workers=[work_address], # # key='key_test', # # retries=retries_num)", "results = job.result() test = 1 try: key = list(results.keys())[0] prog = results[key]['Program']", "# Solvent term penalty # When the value is 0.0 the solvent interactions", "Number of generations # Integer in interval [1-N] NUMGENER 500 # Use Adaptive", "@hlp.timeit def check_dask_jobs(self, submitted_jobs_dask, finished_jobs, finished_jobs_dict): import copy # modified_submitted_jobs_dask = copy.deepcopy(submitted_jobs_dask) for", "print('Now continue for LeDock:D') self.save_state_data_json() test = 1 self.prep_LeDock_run = True @hlp.timeit def", "temp_mdtraj_indexes self.file_save_list = file_save_list self.abs_file_save_list = abs_file_save_list self.simStates = simStates test = 1", "EPI_uber_dock.run_FlexAid_simulation(parallel=True, waitTime=15) >>> >>> >>> # This is for Autodock vina >>> EPI_uber_dock.set_up_Vina_Box()", "+ os.sep + results_dask['energyMM_filename'] out_mem = results_dask['energyMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close()", "[0.0-1.0] DEECLA 0.8 # Use instances of side-chain conformers rather than using the", "self.prep_g_mmpbsa_run = False self.folder_exists = False # Running vina,whether it's for exhaustiveness or", "= self.ledock_data['num_samples'] # self.ledock_input_info = self.ledock_data['LeDockInputInfo'] # self.param_ledock_template = self.ledock_data['paramFull'] # except: #", "print('TADA ', self.ledock_directories) test = 1 # This will hold information about run", "of generations # Integer in interval [1-N] NUMGENER 500 # Use Adaptive Genetic-Algorithm", "Do not consider intramolecular interactions NOINTR # Side-chain rotamer acceptance threshold # Float", "molname='Unknown', receptor_name='Unknown', folder_path='.', job_name = 'Unknown', load_state_file=None): self.load_state_file = load_state_file if load_state_file is", "the average number of neighbors, while original DBSCAN had memory complexity O(n). Sparse", "HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT", "', path) self.flexaid_path = path def set_ledock_path(self, path): print('LeDock path is set to", "= self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) time.sleep(60) print('->' * 10) print('Everything is finished :))))))') print('---'", "data.update({'part_num': part_num}) data.update({'save_run_name': save_run_name}) data.update({'dask': {}}) traj_abs = data['absFileSave'] traj_file = open(traj_abs, 'rb')", "al. (2002) Bioinformatics. 18(10); 1365-1373 VCTPLA R # Use normalized surfaces in contacts", "# # # Try to load initial LeDock try: self.mdtraj_frames = self.state_data['energySoftware']['g_mmpbsa']['frames'] self.mdtraj_parts", "[] abs_file_save_list = [] simStates = {'simStates':{}} for i,traj in zip(select_indexes,select_frames): temp_state =", "len(queue_jobs) > 0: if curr_index == len(queue_jobs): curr_index = 0 if curr_worker ==", "TODO need to think about seed #./ ledock_linux_x86 dock. in command_receptor = self.ledock_path", "# curr_LeDock += LeDock # # test = 1 # run_docking_queue += run_docking_rDock[curr_rDock:curr_rDock", "listed as FLEDIH lines in Processed_files/BTN.inp # In our example, Biotin has 5", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE", "# Use instances of side-chain conformers rather than using the Penultimate Rotamer Library", "self.objects_loaded = True def get_uber_g_mmpbsa_run_folder_name(self): curr_folder = os.getcwd() return curr_folder + os.sep +", "+ self.ligand_name + '.dat' generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat return [generate_ga_dat,", "VARDIH 5.0 # The variation in degrees for flexible dihedrals of the ligand", "outputs import copy self.before_dask = copy.deepcopy(self.state_data) ################################################################################ if self.g_mmpbsa_prepared is True: full_g_mmpbsa_data =", "Integer in interval [1,N] where N is NUMCHROM PRINTCHR 10 ''' generate_ga_dat =", "commands and save to json :param num_samples: test value 6 :return: ''' try:", "self.receptor_name + '-' + self.ligand_name + '.dat' generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep +", "part is empty verify yolo') # # try: # self.ledock_param_title = self.ledock_data['LeDock_params']['title'] #", "open(traj_abs, 'rb') traj_mem = traj_file.read() traj_filename = data['fileSave'] data['dask'].update({'tprName': tpr_filename}) data['dask'].update({'mdpName': mdp_filename}) data['dask'].update({'indexName':", "EPI_uber_dock.flexaid_generate_ga_dat_parameters() >>> EPI_uber_dock.flexaid_generate_config_input() >>> EPI_uber_dock.prep_FlexAid_dock_run_commands() >>> EPI_uber_dock.run_FlexAid_simulation(parallel=True, waitTime=15) >>> >>> >>> # This", "1 #################################################################################################### self.run_mmpbsa_dask = run_g_mmpbsa curr_LeDock = 0 # very slow # while", "'-') if dir_name in dirname: # # print(dir_name) dir_names.append(dirname) # print sorted(dir_names) return", "final_str += pdbnam final_str += inplig final_str += rgnopt_locclf final_str += optimz1 final_str", "\"{0}.mol2\".format(self.receptor_name)) # self.output_receptor_rdock.write(self.receptor_pybel) # self.output_receptor_rdock.close() # # self.output_ligand_rdock = Outputfile(\"sd\", \"{0}.sd\".format(self.ligand_name)) # self.output_ligand_rdock.write(self.ligand_pybel", "#self.ledock_samples = self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list'] # Divide trajectory to number of free cores # TODO", "use in source and binary forms, with or without # modification, are permitted", "Scatter a lot better using scatter for big files for upload G_MMPBSA files", "self.receptor_name + '_' + self.molecule_name + '_' + self.run_type + '.json' # This", "list(range(len(select_frames))) folder_to_save = self.g_mmpbsa_folder temp_mdtraj = [] temp_mdtraj_indexes = [] file_save_list = []", "EPI_uber_dock.calculate_box_edges_from_com() >>> >>> >>> EPI_uber_dock.prepare_uber_docker() >>> >>> >>> #This is for rDock, and", "+ os.sep + 'EPI.pdbqt' >>> molname = 'EPI' >>> receptor_name = 'LasR' >>>", "= 'vina_' + self.run_type_samples + '_' + str(sample_num) command_to_run = \"vina --receptor {0}", "################################################################################ if self.g_mmpbsa_prepared is True: full_g_mmpbsa_data = self.state_data['energySoftware']['g_mmpbsa'] test = 1 tpr_abs= full_g_mmpbsa_data['tprFile']", "results_dask = results[key]['dask'] # else: # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)] = results[key] # if filename is", "results[key] # self.state_data['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] # results_dask = results[key]['dask'] #", "curr_index = 0 curr_worker = 0 # prepare worker ids for easier switch", "generations # Integer in interval [1-N] NUMGENER 500 # Use Adaptive Genetic-Algorithm #", "list of conditions and the following # disclaimer in the documentation and/or other", "run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num, data) # # test = 1 # # test = 1 ###################################################################################################", "g_mmpbsa jobs') # TODO should I add json saving of information or not?", "Constant crossover probability # Float in interval [0.0,1.0] # Only considered when ADAPTVGA", "= data['fileSave'] data['dask'].update({'tprName': tpr_filename}) data['dask'].update({'mdpName': mdp_filename}) data['dask'].update({'indexName': index_filename}) data['dask'].update({'trajMem':traj_mem, 'trajName':traj_filename}) data['dask'].update({'tprName': tpr_filename, 'tprMem':", "# 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() ############################################################################################## def flexaid_generate_config_input_dask(self): ''' Generate flexaid", "Float in interval [0.0-1.0] DEECLA 0.8 # Use instances of side-chain conformers rather", "+ os.sep + self.receptor_name + '_' + self.molecule_name + '.json' if filename is", "# self.state_data_samples = self.state_data.copy() ############################################################################################## def flexaid_generate_config_input_dask(self): ''' Generate flexaid config input file", "time.sleep(1) # Delay for 1 sec print('Ok %s secs have pass' % (n))", "[B,R,X] for Bissecting, Radical and Extended radical plane # See McConkey et al.", "check_dask_jobs(self, submitted_jobs_dask, finished_jobs, finished_jobs_dict): import copy # modified_submitted_jobs_dask = copy.deepcopy(submitted_jobs_dask) for i, job", "'PDBNAM ' + '{0}\\n\\n'.format( self.receptor_flexaid_mol2) # Specify the processed ligand file to use", "# How to save submitted jobs state print('-------') if curr_index == 0 and", "json.dump(filedata, open(filename, \"w\"), sort_keys=True, indent=4) # TODO should I add json saving of", "0.10 # Crossover operator # Intragenic crossovers are possible INTRAGEN # Specifies that", "information about run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() if len(self.ledock_directories) == 0: print('Creating", "from molmolpy.utils.cluster_quality import * from molmolpy.utils import folder_utils import json from molmolpy.utils import", "line #INCHOH # Permeability allowed between atoms # Float in interval [0.0,1.0] from", "[] simStates = {'simStates':{}} for i,traj in zip(select_indexes,select_frames): temp_state = {str(i):{}} temp_traj =", "test = 1 tpr_abs= full_g_mmpbsa_data['tprFile'] tpr_file = open(tpr_abs, 'rb') tpr_mem = tpr_file.read() tpr_filename", "= 0 # very slow # while len(run_docking_queue) != 40: # run_docking_queue +=", "Read more in the :ref:`User Guide <MoleculeObject>`. Parameters ---------- filename : str, optional", "runs the main program submitted_jobs = [] submitted_jobs_dask = [] queue_jobs = self.run_mmpbsa_dask", "False: print('prep gmmpbsa ', prep_g_mmpbsa) return 'Do not prepare run files' if self.g_mmpbsa_prepared", "in flexible_bonds_data_text_list: if 'FLEDIH' in i: print(i) temp = i.split(' ') print(temp) flex_index", "is not None: # filedata = filedata # filename = self.absolute_json_state_file else: filedata", "= False # Running vina,whether it's for exhaustiveness or traditional run self.folder_path =", "print('Vina run type: {0}'.format(self.run_type_samples)) print(self.sim_folder_run_samples) folder_utils.create_folder(self.sim_folder_run_samples) self.folder_exists_samples = True self.state_data_samples.update({'receptorFile': self.receptor_file, 'ligandFile': self.ligand_file,", "# curr_FlexAid += FlexAid # # run_docking_queue += run_docking_Vina[curr_Vina:curr_Vina + Vina] # curr_Vina", "flexible_index_list_phrases: final_str += y final_str += '\\n' rmsdst = 'RMSDST ' + '{0}_ref.pdb\\n\\n'.format(", "self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] print('No need to generate LeDock commands') except: self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list': self.ledock_samples}) self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states)", "except Exception as error: pass finished_jobs.append(job) finished_jobs_dict.update({i: True}) results = job.result() test =", "+ os.sep + results_dask['contrib_apol_filename'] out_mem = results_dask['contrib_apol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close()", "overwrite=True) self.ligand_pybel.write(\"mol2\", self.absolute_ligand_ledock_mol2, overwrite=True) self.ledock_folder_exists = True test = 1 # TODO enter", "open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['energyMM_filename'] out_mem =", "os.chdir('HSL_exhaustiveness') self.trajectory_file = self.state_data['trajectoryFile'] self.mdp_file = self.state_data['mdpFile'] self.tpr_file = self.state_data['tprFile'] self.index_file = self.state_data['indexFile']", "self.prepare_g_mmpbsa() test = 1 curr_client = dask_client # Testing Phase total_free_cores = 16", "processed ligand file to use # BTN.inp has the unique RESNUMC identifier LIG9999A", "#################################################################################################################### def prepare_samples_collection_run(self, standard_exhaust=128, num_samples_run=100, run_type='samples_run'): if self.setup_box is False: print('Please setup simulation", "filename = self.absolute_json_state_file filedata = self.state_data # elif filedata is not None: #", "submitted jobs') while len(finished_jobs) != job_quantity: finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) time.sleep(60)", "[1-N] NUMCHROM 500 # Number of generations # Integer in interval [1-N] NUMGENER", "next line #INCHOH # Permeability allowed between atoms # Float in interval [0.0,1.0]", "generated randomly POPINIMT RANDOM # Fitness function # Value in [LINEAR,PSHARE] FITMODEL PSHARE", "'{0}_ref.pdb\\n\\n'.format( self.ligand_flexaid_initials) final_str += rmsdst final_str += flexaid_config_input_template generate_config_input_file = 'CONFIG_' + self.receptor_name", "filename=None): ''' :param filename: Saves state file :return: ''' # import json #", "# self.ledock_zmin = self.ledock_data['LeDock_params']['zmin'] # self.ledock_zmax = self.ledock_data['LeDock_params']['zmax'] # # except: # print('LeDock_params", "= open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['apolar_filename'] out_mem", "are not finished for pre_job in run_mmpbsa_queue: # print(pre_job) if pre_job['runFinished'] is False:", "self.ledock_directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.ledock_folder_name) print('TADA ', self.ledock_directories) test = 1 # This will", "# elif filedata is not None: # filedata = filedata # filename =", "{}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param': self.generate_ga_dat_pameters, # 'GA_DataName': self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs,", "= results[key] # if filename is None and filedata is None: # #", "= copy.deepcopy(results) update_results[key].pop('dask', None) # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = results[key] # self.state_data['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key]", "math.trunc(traj_len/total_free_cores) select_frames = list(range(0,traj_len,div_traj)) select_indexes = list(range(len(select_frames))) folder_to_save = self.g_mmpbsa_folder temp_mdtraj = []", "0.95 0.10 # Constant crossover probability # Float in interval [0.0,1.0] # Only", "# original data before transformation # Add receptor name def set_mgltools_path(self, path): print('MGLTools", "= self.state_data['energySoftware']['g_mmpbsa']['fileList'] self.abs_file_save_list = self.state_data['energySoftware']['g_mmpbsa']['absFileList'] self.simStates = self.state_data['energySoftware']['g_mmpbsa']['simStates'] test = 1 self.g_mmpbsa_prepared =", "self.g_mmpbsa_prepared = self.state_data['energySoftware']['g_mmpbsa']['prepare'] # self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) # self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) # self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) #", "------------------------------------------------------------------------ --> import itertools import time color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold', 'darkorange'])", "out_name = abs_folder + os.sep + results_dask['contrib_pol_filename'] out_mem = results_dask['contrib_pol_mem'] out_file = open(out_name,", "FlexAid] # # curr_FlexAid += FlexAid # # run_docking_queue += run_docking_Vina[curr_Vina:curr_Vina + Vina]", "variation in degrees for the anchor angle of the ligand # Float in", "self.receptor_pybel.write(\"pdb\", self.absolute_receptor_ledock_pdb, overwrite=True) self.ligand_pybel.write(\"mol2\", self.absolute_ligand_ledock_mol2, overwrite=True) self.ledock_folder_exists = True test = 1 #", "list(range(1, num_samples_run + 1)) self.run_folder_name_samples = self.receptor_name + '_' + self.molecule_name + '_'", "self.state_data = {} self.state_data_samples = {} self.g_mmpbsa_run_finished = False self.g_mmpbsa_sim_states = {'simStates': {}}", "2016-2019,<NAME>. # All rights reserved. # Redistribution and use in source and binary", "command_to_run, 'commandToClean':command_to_clean, 'dokFileName':ligand_clear_dok, 'runFinished': False}} self.LeDock_sim_states.update(temp_dict) self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict) # try: # os.system(command_to_run) # except", "= self.state_data_samples['exhaustivenessList'] self.samples_run = self.state_data_samples['samplesList'] self.folder_path = self.state_data_samples['folderPath'] self.run_type = self.state_data_samples['runType'] self.molecule_name =", "get_exhaust_run_folder_name(self): curr_folder = os.getcwd() return curr_folder + os.sep + self.run_folder_name def get_samples_run_folder_name(self): curr_folder", "the initial population is generated randomly POPINIMT RANDOM # Fitness function # Value", "self.run_type self.sim_folder_run = self.folder_path + os.sep + self.run_folder_name # Create folder don't forget", "self.state_data_samples['samples_exhaust'] self.sim_folder_run_samples = self.state_data_samples['simRunFolder'] # .split('/')[-1] self.directories_samples = self.state_data_samples['directory'] self.setup_box = self.state_data_samples['setup'] self.folder_exists", ":param filename: load json state data :return: ''' # self.absolute_path = os.path.abspath(filename) self.load_state_called_samples", "# self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)] = data test = 1 run_g_mmpbsa.append(data) # # result = run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num,", "contacts NORMAR # Define the RMSD cutoff between clusters # Float in interval", "+ self.ligand_name + '.inp' return generate_config_input_file, final_str # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update(", "self.run_FlexAid_sim(FlexAid_sample_num, waitTime=waitTime) data = self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)] save_run_name = \"g_mmpbsa_part_{0}\".format(part_num) data.update({'Program': 'g_mmpbsa'}) data.update({'part_num': part_num}) data.update({'save_run_name':", "self.output_receptor_rdock.close() # # self.output_ligand_rdock = Outputfile(\"sd\", \"{0}.sd\".format(self.ligand_name)) # self.output_ligand_rdock.write(self.ligand_pybel ) # self.output_ligand_rdock.close() self.ledock_folder_name", "print('---' * 10) return finished_jobs, finished_jobs_dict # @hlp.timeit def run_dask_gmmpbsa(self, client=None, max_jobs_to_run=10): #", "[1-N] NUMGENER 500 # Use Adaptive Genetic-Algorithm # Value of 0 or 1", "test = 1 run_g_mmpbsa.append(data) # # result = run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num, data) # # test", "= self.find_sample_folders(self.folder_path, dir_name=self.run_type) # self.directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.rdock_folder_name) # print('TADA ', self.directories) test", "out_pdbqt_filename = out_pdbqt_name # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = \\ # results[key] update_results = copy.deepcopy(results)", "original_get_worker_free[curr_worker_id]['preped']['workerDir'] workstation_freemem = workstation_preped_temp['freeMemory'] workstation_freecpu = workstation_preped_temp['freeCores'] curr_item_prog = curr_item['Program'] ############################################################ # submitted_jobs_dask", "CF for ligand atoms despite including flexible side-chains #SCOLIG # Ends reading of", "+ self.receptor_name + '_' + self.molecule_name + '_' + self.run_type_samples + '.json' #", "= queue_jobs[0] # # result = run_dask_tools.run_gmmpbsa_using_dask(job_test) test = 1 # Local upload", "# Integer in interval [1,N-1] where N is NUMCHROM STEADNUM 950 # Number", "# print('i is ', i) print('Finished checking dask submissions ---\\n') print('---' * 10)", "= np.random.randint(low_seed, high_seed) command_to_run = \"{0} {1}\".format(command_receptor, parm_name) ligand_clear_dok = sample_data['ligand_clear_name'] + '.dok'", "and the following disclaimer. # * Redistributions in binary form must reproduce the", "# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING", "self.save_run_name = 'vina_' + self.run_type_samples + '_' + str(sample_num) command_to_run = \"vina --receptor", "= get_worker_free[worker]['preped'] # total_free_cores += preped['freeCores'] if prep_g_mmpbsa is False: print('prep gmmpbsa ',", "e) sys.exit(0) @hlp.timeit def check_dask_jobs(self, submitted_jobs_dask, finished_jobs, finished_jobs_dict): import copy # modified_submitted_jobs_dask =", "Specify to use one or multiple cleft(s) as binding-site rgnopt_locclf = 'RNGOPT LOCCLF", "= self.g_mmpbsa_folder + os.sep + self.receptor_name + '_' + self.molecule_name + '_' +", "results[key]['part_num'] results_dask = results[key]['dask'] original_data = self.state_data['energySoftware'][prog] abs_folder = self.g_mmpbsa_folder # original_data['AbsFolder'] out_name", ">>> EPI_samples = '/media/Work/MEGA/Programming/StressHormones/' >>> >>> >>> receptor_file = EPI_folder + os.sep +", "self.state_data_samples = self.state_data.copy() # # self.save_state_data_json() # TODO this part needs to be", "considered when REPMODEL is BOOM BOOMFRAC 1.0 # Number of new individuals to", "# To include water molecules, uncomment the next line #INCHOH # Permeability allowed", "{0}'.format(self.run_type_samples)) print(self.sim_folder_run_samples) folder_utils.create_folder(self.sim_folder_run_samples) self.folder_exists_samples = True self.state_data_samples.update({'receptorFile': self.receptor_file, 'ligandFile': self.ligand_file, 'exhaustivenessList': self.exhaustiveness, 'samples_exhaust':", "distance between two samples for them to be considered as in the same", "spaces :return: ''' flexaid_config_input_template = '''# Optimization method (genetic-algorithms) METOPT GA # The", "self.ligand_file, 'exhaustivenessList': self.exhaustiveness, 'samples_exhaust': self.samples_exhaust, 'samplesList': self.samples_run, 'folderPath': self.folder_path, 'runType': self.run_type_samples, 'molName': self.molecule_name,", "pdb or pdbqt file. 2 types of parsers can be used: 1.molmolpy 2.", "self.ledock_samples}) self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states) for sample_num in self.ledock_samples: self.prep_LeDock_dock_command(sample_num) print('Now continue for LeDock:D') self.save_state_data_json() test", "', path) self.mgltools_utilities = path def set_flexaid_path(self, path): print('FlexAid path is set to", "and process structure for docking using lepro # ./lepro_linux_x86 LasR_flexaid.pdb os.chdir(self.ledock_absolute_folder_name) command_receptor =", "matrix # Float in interval [-200.0,200.0] SLVPEN 0.0 # Use Vcontacts indexing VINDEX", "used: 1.molmolpy 2. pybel Stores molecule information in pandas dataframe as well as", "success ---> ', i) except Exception as error: print('error is ', error) #", "= receptor_name self.run_type = 'g_mmpbsa' self.state_data = {} self.state_data_samples = {} self.g_mmpbsa_run_finished =", "1 tpr_abs= full_g_mmpbsa_data['tprFile'] tpr_file = open(tpr_abs, 'rb') tpr_mem = tpr_file.read() tpr_filename = tpr_abs.split(os.sep)[-1]", "LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND", "True def get_uber_g_mmpbsa_run_folder_name(self): curr_folder = os.getcwd() return curr_folder + os.sep + self.run_folder_name def", "set_receptor_name(self, receptor_name): self.receptor_name = receptor_name # This might need to get modified def", ":return: ''' # self.absolute_path = os.path.abspath(filename) self.load_state_called = True print(os.path.abspath(__file__)) self.state_data = json.load(open(filename,", "self.load_state_data_json(self.load_state_file) def prepare_ledock_settings(self): ''' Prepare ultraDock folder and initial json configuration >>> EPI_uber_dock.prepare_rdock_settings()", "run self.folder_path = folder_path self.command_run_list = [] self.command_samples_run_list = [] self.molecule_name = molname", "modification, are permitted provided that the following conditions are # met: # *", "and initial json configuration >>> EPI_uber_dock.prepare_rdock_settings() Convert with pybel to mol2 for receptor", "dir_name in dirname: # # print(dir_name) dir_names.append(dirname) # print sorted(dir_names) return sorted(dir_names) except", "from molmolpy.tools import run_dask_tools test = 1 curr_client = client worker_status = run_dask_tools.get_dask_worker_status(curr_client)", "MAYBE CHECK FOLDER HERE # #big_future = client.scatter(pop_item, workers=[workstation_address], hash=False) big_future = pop_item", "= copy.deepcopy(submitted_jobs_dask) for i, job in enumerate(submitted_jobs_dask): status = job.status if status ==", "10) print('Everything is finished :))))))') print('---' * 10) print('\\n') def prepare_for_dask_cluster(self, LeDock=2, rDock=2,", "# very slow # while len(run_docking_queue) != 40: # run_docking_queue += run_docking_LeDock[curr_LeDock:curr_LeDock +", "sample_num: :param pose_gen: default generate 20 poses :return: ''' try: if self.setup_ledock_pameters is", "+ run_docking_Vina final_queue_job = [] # Need to select those that are not", "when ADAPTVGA is 0 MUTARATE 0.10 # Crossover operator # Intragenic crossovers are", "error) # print('i is ', i) print('Finished checking dask submissions ---\\n') print('---' *", "USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH", "for ligand atoms despite including flexible side-chains #SCOLIG # Ends reading of CONFIG", "= 2 # # task = client.submit(run_dask_tools.run_vina_using_dask, # # data, # # workers=[work_address],", "tpr_filename, 'tprMem': tpr_mem}) data['dask'].update({'mdpName': mdp_filename, 'mdpMem': mdp_mem}) data['dask'].update({'indexName': index_filename, 'indexMem': index_mem}) test =", "traj_len = len(self.trajectory_mdtraj) import math # Free core approach div_traj = math.ceil(traj_len/total_free_cores) #", "final_queue_job.append(pre_job) test = 1 self.run_mmpbsa_dask = final_queue_job # random.shuffle(self.run_docking_queue) print('Finished preparing g_mmpbsa jobs')", "# # LeDock settings part # # self.ledock_data = self.state_data['dockSoftware']['LeDock'] # test =", "True print(os.path.abspath(__file__)) self.state_data = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.trajectory_file = self.state_data['trajectoryFile'] self.mdp_file =", ">>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True) >>> # >>> # LasR_MOR_mmpbsa_calc.run_dask_docking(client) Notes ----- See examples/cluster/plot_dbscan.py", "ligand # Float in interval [1.0-30.0] VARANG 5.0 # The variation in degrees", "# Local upload test # big_future = self.dask_prep # run_dask_tools.upload_g_mmpbsa_files_dask(big_future) #TODO # Scatter", "# # try: # self.ledock_param_title = self.ledock_data['LeDock_params']['title'] # self.rdock_title = self.ledock_data['LeDock_params']['title'] # #", "open( self.flexaid_absolute_processed_files_folder + os.sep + '{0}.inp'.format(self.ligand_flexaid_initials), 'r') flexible_bonds_data_text = flexible_bonds_data.read() flexible_bonds_data.close() flexible_bonds_data_text_list =", "about seed #./ ledock_linux_x86 dock. in command_receptor = self.ledock_path + os.sep + 'ledock_linux_x86'", "load_state_file if load_state_file is not None: self.load_state_data_json(self.load_state_file) else: print('G_MMPBSA Object has been created')", "= 'contrib_apol_{0}.dat'.format(i) temp_state[str(i)].update({'energyMM':energy_mm, 'polar':polar, 'apolar':apolar, 'contrib_MM':contrib_mm, 'contrib_pol':contrib_pol, 'contrib_apol':contrib_apol}) temp_traj.save(abs_file_save) temp_state[str(i)].update({'fileSaved': True }) simStates['simStates'].update(temp_state)", "<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- \"\"\" def __init__(self, traj, topol, tpr_file, mdp_file, index_file,", "Guide <MoleculeObject>`. Parameters ---------- filename : str, optional The maximum distance between two", "= self.state_data['energySoftware']['g_mmpbsa']['simStates'] #self.ledock_samples = self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list'] # Divide trajectory to number of free cores", "new Sim\") temp_dict = {str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'commandToClean':command_to_clean, 'dokFileName':ligand_clear_dok, 'runFinished': False}}", "a review # This will be for leDock # if prep_g_mmpbsa is True:", "test = 1 # TODO # This part runs the main program submitted_jobs", "+ self.molecule_name + '_' + 'LeDock' self.ledock_absolute_folder_name = self.uber_dock_folder + os.sep + self.ledock_folder_name", "EPI_folder + os.sep + 'centroid_model_clust2.pdbqt' >>> ligand_file = EPI_folder + os.sep + 'EPI.pdbqt'", "# run_docking_queue += run_docking_Vina[curr_Vina:curr_Vina + Vina] # curr_Vina += Vina # # test", "self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param': self.generate_ga_dat_pameters, # 'GA_DataName': self.generate_ga_dat, # 'GA_DATA_Abs':", "sort_keys=True, indent=4) # TODO should I add json saving of information or not?", "self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) self.generate_ga_dat_object_file.close() self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) self.state_data['dockSoftware']['FlexAid']['GA_params'].update( {'generateGA_param': self.generate_ga_dat_pameters, 'GA_DataName': self.generate_ga_dat, 'GA_DATA_Abs': self.generate_ga_dat_name_abs, 'GA_ParamFull': self.flexaid_ga_dat_param_template})", "docking :return: ''' self.flexaid_ga_dat_param_template = '''# Number of chromosomes (number individuals in the", "whole complex SCOOUT # Only calculate the CF for ligand atoms despite including", "shared fitness function # Floats in interval [0.0,1000.0] SHAREALF 4.0 SHAREPEK 5.0 SHARESCL", "+ self.receptor_name + '-' + self.ligand_name + '.inp' return generate_config_input_file, final_str # self.state_data['dockSoftware']['FlexAid'].update({'GA_params':", "ledock folder and process structure for docking using lepro # ./lepro_linux_x86 LasR_flexaid.pdb os.chdir(self.ledock_absolute_folder_name)", "one extra line for each flexible bond of the ligand # The allowable", "workers=[work_address], # # key='key_test', # # retries=retries_num) # # # TODO This part", "receptor_name='Unknown', folder_path='.', job_name = 'Unknown', load_state_file=None): self.load_state_file = load_state_file if load_state_file is not", "self.mdtraj_frames = select_frames self.mdtraj_sliced = temp_mdtraj self.mdtraj_parts = temp_mdtraj_indexes self.file_save_list = file_save_list self.abs_file_save_list", "out_file.close() out_name = abs_folder + os.sep + results_dask['contribMM_filename'] out_mem = results_dask['contribMM_mem'] out_file =", "# self.output_ligand_rdock = Outputfile(\"sd\", \"{0}.sd\".format(self.ligand_name)) # self.output_ligand_rdock.write(self.ligand_pybel ) # self.output_ligand_rdock.close() self.ledock_folder_name = self.receptor_name", "to run on dask server # # # # # TODO this works", "temp[-2] flexible_index_list.append(int(flex_index)) temp_line = 'OPTIMZ {0} {1} {2}\\n'.format(self.flexaid_res_number, self.flexaid_res_chain, flex_index) flexible_index_list_phrases.append(temp_line) test =", "self.flexaid_res_chain, flex_index) flexible_index_list_phrases.append(temp_line) test = 1 final_str += pdbnam final_str += inplig final_str", "self.state_data_samples['boxSettings']['size_z'] self.num_modes = self.state_data_samples['boxSettings']['numModes'] def hold_nSec(self, n): for i in range(1, n +", "'mdpMem':mdp_mem}) data_pre.update({'indexName':index_filename, 'indexMem':index_mem}) self.dask_prep = data_pre for part_num in full_g_mmpbsa_data['parts']: # self.run_FlexAid_sim(FlexAid_sample_num, waitTime=waitTime)", "need to think about seed #./ ledock_linux_x86 dock. in command_receptor = self.ledock_path +", "self.trajectory_file = traj self.topology_file = topol self.tpr_file = tpr_file self.mdp_file = mdp_file self.index_file", "for i, id in enumerate(get_worker_free): worker_ids.update({i: id}) custom_index_curr = 3 while len(queue_jobs) >", "# run_docking_queue += run_docking_rDock[curr_rDock:curr_rDock + rDock] # curr_rDock += rDock # # run_docking_queue", "= self.get_uber_dock_run_folder_name() if len(self.ledock_directories) == 0: print('Creating rdock folder in uberDocker folder \\n')", "+ self.molecule_name + '_' + self.run_type_samples self.sim_folder_run_samples = self.folder_path + os.sep + self.run_folder_name_samples", "= self.ledock_data['LeDock_params']['zmax'] # # except: # print('LeDock_params is empty verify yolo') # #", "'_' + self.run_type_samples self.sim_folder_run_samples = self.folder_path + os.sep + self.run_folder_name_samples # Create folder", "path is set to ', path) self.mgltools_utilities = path def set_flexaid_path(self, path): print('FlexAid", "Check of submitted jobs') while len(finished_jobs) != job_quantity: finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs,", "dock protocol for LeDock, rDock,FlexAid, Vina :return: ''' current_pid = multiprocessing.current_process().pid print(\"Main Process", "= workstation_preped_temp['freeCores'] curr_item_prog = curr_item['Program'] ############################################################ # submitted_jobs_dask len less than 16 jobs_running", "'w') out_file.write(out_mem) out_file.close() # out_pdbqt_filename = out_pdbqt_name # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = \\ #", "results[key] # if filename is None and filedata is None: # # filename", "ran fine print('Updated receptor with LePro\\n') os.chdir(self.uber_dock_folder) self.state_data['dockSoftware']['LeDock'].update( {'receptor_pdb': self.receptor_ledock_pdb, 'ligand_mol2': self.ligand_ledock_mol2, 'lepro_pdb':", "Exclude hetero groups in the target (water,metal,modified amino acids,cofactors,ligands) # To exclude these", "# filename = self.absolute_json_state_file else: filedata = filedata filename = filename json.dump(filedata, open(filename,", "# When the value is 0.0 the solvent interactions are derived from the", "VIP = [] for dirname, dirnames, filenames in os.walk(folder): for i in filenames:", "import json from molmolpy.utils import helper as hlp # matplotlib.style.use('ggplot') sns.set(style=\"darkgrid\") low_seed =", "default generate 20 poses :return: ''' try: if self.setup_ledock_pameters is not False: #", "self.trajectory_mdtraj.topology.to_dataframe() self.objects_loaded = True def get_uber_g_mmpbsa_run_folder_name(self): curr_folder = os.getcwd() return curr_folder + os.sep", "self.ledock_folder_name test = 1 # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.ledock_directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.ledock_folder_name)", "+ self.receptor_name + '-' + self.ligand_name + '.dat' self.generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep", "self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile': self.mdp_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile': self.tpr_file}) self.save_state_data_json() test = 1 #self.g_mmpbsa_sim_states = self.state_data['energySoftware']['g_mmpbsa']['simStates'] #self.ledock_samples =", "# Use Adaptive Genetic-Algorithm # Value of 0 or 1 ADAPTVGA 1 #", "= run_type self.prep_samples_run = True self.samples_exhaust = standard_exhaust self.samples_run = list(range(1, num_samples_run +", "core approach div_traj = math.ceil(traj_len/total_free_cores) # select_indexes = list(range(total_free_cores)) # Maximum parallel #div_traj", "try: # self.setup_ledock_pameters = self.ledock_data['setup_LeDock'] # self.ledock_num_samples = self.ledock_data['num_samples'] # self.ledock_input_info = self.ledock_data['LeDockInputInfo']", "self.index_file = self.state_data['indexFile'] self.folder_path = self.state_data['folderPath'] self.run_type = self.state_data['runType'] self.molecule_name = self.state_data['molName'] self.receptor_name", "dask_client=None, prep_g_mmpbsa=True): ''' prepare dask tasks for g_mmpbsa :return: ''' self.prepare_g_mmpbsa() test =", "run states if len(self.directories_samples) == 0: print('Creating folder for vina samples run\\n') print('Vina", "need to think about seed self.save_run_name = 'vina_' + self.run_type_samples + '_' +", "self.molecule_name def get_receptor_name(self): return self.receptor_name def set_molecule_name(self, mol_name): self.molecule_name = mol_name def set_receptor_name(self,", "result = run_dask_tools.run_gmmpbsa_using_dask(job_test) test = 1 # Local upload test # big_future =", "self.mdtraj_frames = self.state_data['energySoftware']['g_mmpbsa']['frames'] self.mdtraj_parts = self.state_data['energySoftware']['g_mmpbsa']['parts'] self.file_save_list = self.state_data['energySoftware']['g_mmpbsa']['fileList'] self.abs_file_save_list = self.state_data['energySoftware']['g_mmpbsa']['absFileList'] self.simStates", "mdp_file, index_file, first_index, second_index, molname, receptor_name) >>> >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client) >>> >>>", "data_pre.update({'indexName':index_filename, 'indexMem':index_mem}) self.dask_prep = data_pre for part_num in full_g_mmpbsa_data['parts']: # self.run_FlexAid_sim(FlexAid_sample_num, waitTime=waitTime) data", "VIP.append(i) return VIP except Exception as e: print(\"error in find_files: \", e) sys.exit(0)", "to output MAXRES 20 # Only output scored atoms in the final results", "hold information about run states if len(self.directories_samples) == 0: print('Creating folder for vina", "self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat return [generate_ga_dat, ] # self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w')", "+ '-' + self.ligand_name + '.inp' return generate_config_input_file, final_str # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) #", "= self.state_data_samples['boxSettings']['center_x'] self.y_center = self.state_data_samples['boxSettings']['center_y'] self.z_center = self.state_data_samples['boxSettings']['center_z'] self.x_size = self.state_data_samples['boxSettings']['size_x'] self.y_size =", "= i.split(' ') print(temp) flex_index = temp[-2] flexible_index_list.append(int(flex_index)) temp_line = 'OPTIMZ {0} {1}", "test = 1 self.save_run_name = \"ledock_{0}_sample_{1}\".format(self.run_type, sample_num) random_seed = np.random.randint(low_seed, high_seed) command_to_run =", "print('state has beeen loaded \\n') ############################################################################## def flexaid_generate_ga_dat_parameters(self): ''' Generate GA dat parameters", "self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex': self.first_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex': self.second_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile': self.index_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile': self.mdp_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile': self.tpr_file})", "not prep files' traj_len = len(self.trajectory_mdtraj) import math # Free core approach div_traj", "except: # print('LeDock_params is empty verify yolo') # # try: # self.LeDock_sim_states =", "Copyright (c) 2016-2019,<NAME>. # All rights reserved. # Redistribution and use in source", "self.generate_ga_dat_pameters, # 'GA_DataName': self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples =", "self.receptor_name + '_' + self.molecule_name + '_' + self.run_type_samples self.sim_folder_run_samples = self.folder_path +", "run\\n') print(self.sim_folder_run) folder_utils.create_folder(self.sim_folder_run) self.folder_exists = True programs_dict = {'energySoftware': {'g_mmpbsa': {}}} self.state_data.update({'trajectoryFile': self.trajectory_file,", "curr_folder + os.sep + self.run_folder_name_samples def save_state_data_json(self, filedata=None, filename=None): ''' :param filename: Saves", ">>> EPI_uber_dock.flexaid_generate_ga_dat_parameters() >>> EPI_uber_dock.flexaid_generate_config_input() >>> EPI_uber_dock.prep_FlexAid_dock_run_commands() >>> EPI_uber_dock.run_FlexAid_simulation(parallel=True, waitTime=15) >>> >>> >>> #", "ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN", "################################################################################################### test = 1 #################################################################################################### self.run_mmpbsa_dask = run_g_mmpbsa curr_LeDock = 0 # very", "self.setup_box, 'folderCreated': self.folder_exists_samples, 'simStates': {}}) self.prepVinaSim_samples() self.save_state_data_json(filedata=self.state_data_samples, filename=self.json_samples_state_file) self.load_state_called_samples = False self.prep_sample_run =", "pickle.dump(self.cluster_models, open(filename, \"wb\")) # TODO create folder for run saving state run #", ">>> EPI_uber_dock.calculate_cube_edges() >>> EPI_uber_dock.calculate_box_edges_from_com() >>> >>> >>> EPI_uber_dock.prepare_uber_docker() >>> >>> >>> #This is", "degrees for the anchor angle of the ligand # Float in interval [1.0-30.0]", "''' # import json # with open(filename, 'w') as outfile: # json.dump(self.cluster_models, outfile)", "LasR_flexaid.pdb os.chdir(self.ledock_absolute_folder_name) command_receptor = self.ledock_path + os.sep + 'lepro_linux_x86' + ' {0} '.format(self.receptor_ledock_pdb)", "LeDock settings part # # self.ledock_data = self.state_data['dockSoftware']['LeDock'] # test = 1 #", "self.save_state_data_json() self.load_state_called = False else: self.load_state_file = self.json_state_file self.load_state_called = True self.load_state_data_json(self.load_state_file) def", "flexible_bonds_data_text_list = flexible_bonds_data_text.split('\\n') flexible_index_list_phrases = [] flexible_index_list = [] for i in flexible_bonds_data_text_list:", "= g_mmpbsa_dask.GMMPBSAObject(traj, topol_file, tpr_file, mdp_file, index_file, first_index, second_index, molname, receptor_name) >>> >>> >>>", "os.sep + self.receptor_name + '_' + self.molecule_name + '_' + self.run_type_samples + '.json'", "for vina samples run\\n') print('Vina run type: {0}'.format(self.run_type_samples)) print(self.sim_folder_run_samples) folder_utils.create_folder(self.sim_folder_run_samples) self.folder_exists_samples = True", "''' self.prepare_g_mmpbsa() test = 1 curr_client = dask_client # Testing Phase total_free_cores =", "self.state_data['dockSoftware']['LeDock'] # test = 1 # # # Try to load initial LeDock", "test = 1 # # # Try to load initial LeDock try: self.mdtraj_frames", "in interval [0.0,1.0] from fully permeable to no permeability PERMEA 0.9 # Permeability", "1 final_str += pdbnam final_str += inplig final_str += rgnopt_locclf final_str += optimz1", "vina box settings') # except Exception as e: # print(\"error in Sample runSim:", "# Side-chain rotamer acceptance threshold # Float in interval [0.0-1.0] DEECLA 0.8 #", "Float in interval [0.0,1.0] # Only considered when ADAPTVGA is 0 CROSRATE 0.90", "rgnopt_locclf final_str += optimz1 final_str += optimz2 for y in flexible_index_list_phrases: final_str +=", "ligand atoms despite including flexible side-chains #SCOLIG # Ends reading of CONFIG file", "prep finished\") else: print('Please setup vina box settings') # except Exception as e:", "degrees for flexible dihedrals of the ligand # Float in interval [1.0-30.0] VARFLX", "except Exception as e: print(\"error in find_files: \", e) sys.exit(0) def find_sample_folders(self, folder_path='.',", "@hlp.timeit def prep_LeDock_dock_run_commands(self, num_samples=10): ''' Prepare rdock run commands and save to json", "LeDock try: self.mdtraj_frames = self.state_data['energySoftware']['g_mmpbsa']['frames'] self.mdtraj_parts = self.state_data['energySoftware']['g_mmpbsa']['parts'] self.file_save_list = self.state_data['energySoftware']['g_mmpbsa']['fileList'] self.abs_file_save_list =", "# # curr_FlexAid += FlexAid # # run_docking_queue += run_docking_Vina[curr_Vina:curr_Vina + Vina] #", "file_save_list = [] abs_file_save_list = [] simStates = {'simStates':{}} for i,traj in zip(select_indexes,select_frames):", "final_str += rgnopt_locclf final_str += optimz1 final_str += optimz2 for y in flexible_index_list_phrases:", "OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF", "find_sample_files(self, folder): try: VIP = [] for dirname, dirnames, filenames in os.walk(folder): for", "self.receptor_name + '_' + self.molecule_name + '_' + 'rDock' # self.rdock_absolute_folder_name = self.uber_dock_folder", "self.receptor_name + '_' + self.ligand_name + '_LeDock Parameter file' self.ledock_rmsd = 0.5 self.set_up_ledock_dock_blind_parameters(title=self.ledock_title,", "---> ', i) except Exception as error: print('error is ', error) # print('i", "This is to run on dask server # # # # # TODO", "# print(dirname, '-') if dir_name in dirname: # # print(dir_name) dir_names.append(dirname) # print", "+ ' {0} '.format(self.receptor_ledock_pdb) os.system(command_receptor) self.lepro_pdb_file = 'pro.pdb' # Need to check whteter", "self.x_size, self.y_size, self.z_size, self.samples_exhaust, self.num_modes, self.save_run_name, self.save_run_name) print(command_to_run) self.command_samples_run_list.append(command_to_run) print(\"Launching new Sim\") self.state_data_samples['simStates'].update({str(sample_num):", "file. 2 types of parsers can be used: 1.molmolpy 2. pybel Stores molecule", "worker_status_temp = run_dask_tools.get_dask_worker_status(curr_client, custom_index=custom_index_curr) get_worker_free_temp = run_dask_tools.check_free_resources(worker_status_temp) custom_index_curr += 2 print('----------------TEST------------------') curr_item =", "'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['apolar_filename'] out_mem = results_dask['apolar_mem']", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF", "molecule information in pandas dataframe as well as numpy list. Read more in", "+ os.sep + 'lepro_linux_x86' + ' {0} '.format(self.receptor_ledock_pdb) os.system(command_receptor) self.lepro_pdb_file = 'pro.pdb' #", "and it works so comment this part for a while >>> EPI_uber_dock.prepare_rdock_settings() >>>", "test = 1 # # try: # self.setup_ledock_pameters = self.ledock_data['setup_LeDock'] # self.ledock_num_samples =", "[1.0-30.0] VARDIH 5.0 # The variation in degrees for flexible dihedrals of the", "[BOOM,STEADY] REPMODEL BOOM # Fraction of population to create # Only considered when", "ledock_rmsd=self.ledock_rmsd, x_center=self.x_center, y_center=self.y_center, z_center=self.z_center) else: print('state has beeen loaded \\n') ############################################################################## def flexaid_generate_ga_dat_parameters(self):", "# Number of results/docking poses to output MAXRES 20 # Only output scored", "= EPI_folder + os.sep + 'EPI.pdbqt' >>> molname = 'EPI' >>> receptor_name =", "if self.setup_box is False: print('Please setup simulation box') sys.exit(0) self.run_type_samples = run_type self.prep_samples_run", "''' Prepare g_mmpbsa run folder and initial json configuration :return: ''' self.run_folder_name =", "THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE", "temp_mdtraj_indexes = [] file_save_list = [] abs_file_save_list = [] simStates = {'simStates':{}} for", "self.uber_dock_folder = self.get_uber_dock_run_folder_name() ######################################################################################## # # LeDock settings part # # self.ledock_data =", "in binary form must reproduce the above # copyright notice, this list of", "curr_index == 0 and len(submitted_jobs_dask) == 1: curr_index = 0 else: curr_index +=", ">>> molname = 'EPI' >>> receptor_name = 'LasR' >>> >>> >>> EPI_uber_dock =", "# This will be for leDock # if prep_g_mmpbsa is True: # #", "print('LeDock setting part is empty verify yolo') # # try: # self.ledock_param_title =", "= self.sim_folder_run + os.sep + self.receptor_name + '_' + self.molecule_name + '_' +", "True programs_dict = {'energySoftware': {'g_mmpbsa': {}}} self.state_data.update({'trajectoryFile': self.trajectory_file, 'mdpFile': self.mdp_file, 'tprFile': self.tpr_file, 'indexFile':", "os.sep + self.ledock_folder_name test = 1 # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.ledock_directories =", "[] self.molecule_name = molname self.ligand_name = molname self.receptor_name = receptor_name self.run_type = 'g_mmpbsa'", "+= '\\n' rmsdst = 'RMSDST ' + '{0}_ref.pdb\\n\\n'.format( self.ligand_flexaid_initials) final_str += rmsdst final_str", "True self.samples_exhaust = standard_exhaust self.samples_run = list(range(1, num_samples_run + 1)) self.run_folder_name_samples = self.receptor_name", "\"--num_modes {9} \" \\ \"--seed 10 \" \\ \"--log {10}.txt \" \\ \"--out", "EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE", "out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['energyMM_filename'] out_mem = results_dask['energyMM_mem'] out_file", "waitTime=waitTime) data = self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)] save_run_name = \"g_mmpbsa_part_{0}\".format(part_num) data.update({'Program': 'g_mmpbsa'}) data.update({'part_num': part_num}) data.update({'save_run_name': save_run_name})", "in interval [1.0-30.0] VARDIH 5.0 # The variation in degrees for flexible dihedrals", "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE", "= results[key]['Program'] # need [0] key sample_num = results[key]['part_num'] if prog == 'g_mmpbsa':", "in the calculations of surfaces in contact COMPLF VCT # Do not consider", "abs_folder + os.sep + results_dask['apolar_filename'] out_mem = results_dask['apolar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem)", "self.g_mmpbsa_prepared}) self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex': self.first_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex': self.second_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile':", "# self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) # self.generate_ga_dat_object_file.close() # # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param':", "if prep_g_mmpbsa is False: print('prep gmmpbsa ', prep_g_mmpbsa) return 'Do not prepare run", "# Integer in interval [1-N] NUMCHROM 500 # Number of generations # Integer", "POPINIMT RANDOM # Fitness function # Value in [LINEAR,PSHARE] FITMODEL PSHARE # Parameters", "print('prep gmmpbsa ', prep_g_mmpbsa) return 'Do not prepare run files' if self.g_mmpbsa_prepared is", "# try: # self.ledock_param_title = self.ledock_data['LeDock_params']['title'] # self.rdock_title = self.ledock_data['LeDock_params']['title'] # # self.receptor_file_ledock", "self.samples_exhaust, 'samplesList': self.samples_run, 'folderPath': self.folder_path, 'runType': self.run_type_samples, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run_samples,", "'ga_inp_' + self.receptor_name + '-' + self.ligand_name + '.dat' generate_ga_dat_name_abs = self.flexaid_absolute_input_folder +", "self.first_index = first_index self.second_index = second_index self.prep_g_mmpbsa_run = False self.folder_exists = False #", "# # print(dir_name) dir_names.append(dirname) # print sorted(dir_names) return sorted(dir_names) except Exception as e:", "the RMSD cutoff between clusters # Float in interval [0.5,3.0] CLRMSD 2.0 #", "programs_dict = {'energySoftware': {'g_mmpbsa': {}}} self.state_data.update({'trajectoryFile': self.trajectory_file, 'mdpFile': self.mdp_file, 'tprFile': self.tpr_file, 'indexFile': self.index_file,", "i: VIP.append(i) # This is not necessary since info is inside pdbqt file", "self.prep_LeDock_run = True @hlp.timeit def prep_LeDock_dock_command(self, sample_num, pose_gen=20): ''' prepare each separate rDock", "self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict) # try: # os.system(command_to_run) # except KeyboardInterrupt: # # quit # sys.exit()", "= workstation1_preped['workerAddress'] # # # # # This is to run on dask", "+ '_' + self.molecule_name + '_' + self.run_type + '.json' # This will", "print(os.path.abspath(__file__)) self.state_data = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.trajectory_file = self.state_data['trajectoryFile'] self.mdp_file = self.state_data['mdpFile']", "path) self.flexaid_path = path def set_ledock_path(self, path): print('LeDock path is set to ',", "submitted_jobs_dask len less than 16 jobs_running = len(submitted_jobs_dask) - len(finished_jobs) max_jobus = max_jobs_to_run", "self.abs_file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) except: print('G_mmpbsa is empty verify yolo') # # test =", "self.json_samples_state_file = self.sim_folder_run_samples + os.sep + self.receptor_name + '_' + self.molecule_name + '_'", ">>> receptor_file = EPI_folder + os.sep + 'centroid_model_clust2.pdbqt' >>> ligand_file = EPI_folder +", "empty verify yolo') # # try: # self.LeDock_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] # self.ledock_samples =", "= self.ledock_data['LeDock_params']['ymax'] # self.ledock_zmin = self.ledock_data['LeDock_params']['zmin'] # self.ledock_zmax = self.ledock_data['LeDock_params']['zmax'] # # except:", "for FlexAid >>> EPI_uber_dock.prepare_flexaid_settings() >>> EPI_uber_dock.process_flexaid_ligand() >>> EPI_uber_dock.get_flexaid_clefts() >>> EPI_uber_dock.flexaid_generate_ga_dat_parameters() >>> EPI_uber_dock.flexaid_generate_config_input() >>>", "METOPT GA # The variation in degrees for the anchor angle of the", "= self.run_mmpbsa_dask job_quantity = len(queue_jobs) finished_jobs = [] finished_jobs_dict = {} worker_status_free =", "self.get_uber_g_mmpbsa_run_folder_name() self.absolute_json_state_file = self.g_mmpbsa_folder + os.sep + self.receptor_name + '_' + self.molecule_name +", "# Fraction of population to create # Only considered when REPMODEL is BOOM", "= curr_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Passed running ', run_name) # submitted_jobs_dask_temp, finished_jobs_temp", "= self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories_samples = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name_samples) print('TADA ', self.directories_samples) self.json_samples_state_file = self.sim_folder_run_samples", "# Use Vcontacts in the calculations of surfaces in contact COMPLF VCT #", "Permeability for side-chain rotamer acceptance # Float in interval [0.0,1.0] from fully permeable", "add json saving of information or not? def load_samples_state_data_json(self, filename): ''' :param filename:", "self.receptor_name = receptor_name self.run_type = 'g_mmpbsa' self.state_data = {} self.state_data_samples = {} self.g_mmpbsa_run_finished", "self.load_state_called = True print(os.path.abspath(__file__)) self.state_data = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.trajectory_file = self.state_data['trajectoryFile']", "else: print('state has beeen loaded \\n') ############################################################################## def flexaid_generate_ga_dat_parameters(self): ''' Generate GA dat", "len(self.ledock_directories) == 0: print('Creating rdock folder in uberDocker folder \\n') print(self.ledock_directories) folder_utils.create_folder(self.ledock_absolute_folder_name) test", "= {str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'commandToClean':command_to_clean, 'dokFileName':ligand_clear_dok, 'runFinished': False}} self.LeDock_sim_states.update(temp_dict) self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict) #", "os.sep + self.receptor_ledock_pdb self.absolute_ligand_ledock_mol2 = self.ledock_absolute_folder_name + os.sep + self.ligand_ledock_mol2 self.receptor_pybel.write(\"pdb\", self.absolute_receptor_ledock_pdb, overwrite=True)", "term penalty # When the value is 0.0 the solvent interactions are derived", "2 print('----------------TEST------------------') curr_item = queue_jobs[curr_index] test = 1 curr_worker_id = worker_ids[curr_worker] workstation_info_temp =", "curr_worker = 0 print('-----------------------------------------------------------------') worker_status_temp = run_dask_tools.get_dask_worker_status(curr_client, custom_index=custom_index_curr) get_worker_free_temp = run_dask_tools.check_free_resources(worker_status_temp) custom_index_curr +=", "self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) except: print('G_mmpbsa is empty verify yolo') # # test", "curr_folder + os.sep + self.run_folder_name def prepare_g_mmpbsa_dask_protocol(self, dask_client=None, prep_g_mmpbsa=True): ''' prepare dask tasks", "of mass :return: ''' self.trajectory_mdtraj = md.load_xtc(self.trajectory_file, top=self.topology_file) self.trajectory_mdtraj_topology = self.trajectory_mdtraj.topology self.trajectory_mdtraj_topology_dataframe =", "workstation_preped_temp['freeMemory'] workstation_freecpu = workstation_preped_temp['freeCores'] curr_item_prog = curr_item['Program'] ############################################################ # submitted_jobs_dask len less than", "curr_index = 0 else: curr_index += 1 curr_worker += 1 time.sleep(10) test =", "# # TODO This part needs further refinement # # # break #", "LeDock # # test = 1 # run_docking_queue += run_docking_rDock[curr_rDock:curr_rDock + rDock] #", "self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex': self.first_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex': self.second_index})", "Free core approach div_traj = math.ceil(traj_len/total_free_cores) # select_indexes = list(range(total_free_cores)) # Maximum parallel", "self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile': self.index_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile': self.mdp_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile': self.tpr_file}) self.save_state_data_json() test = 1 #self.g_mmpbsa_sim_states = self.state_data['energySoftware']['g_mmpbsa']['simStates']", "G_MMPBSA files # test = 1 # tasks_upload = [] # big_future =", "molmolpy.utils.cluster_quality import * from molmolpy.utils import folder_utils import json from molmolpy.utils import helper", "test = 1 # self.rdock_folder_name = self.receptor_name + '_' + self.molecule_name + '_'", "max_jobus = max_jobs_to_run # g_mmpbsa part if curr_item_prog == 'g_mmpbsa': if workstation_freemem >", "cleft(s) as binding-site rgnopt_locclf = 'RNGOPT LOCCLF ' + 'global_binding_site.pdb\\n\\n' # Specify the", "self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states) for sample_num in self.ledock_samples: self.prep_LeDock_dock_command(sample_num) print('Now continue for LeDock:D') self.save_state_data_json() test =", "forget # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name) print('TADA ', self.directories)", "EPI_uber_dock.prepare_rdock_settings() >>> EPI_uber_dock.generate_rdock_cavity() >>> # Prepare and run Dock programs >>> EPI_uber_dock.prep_rDock_dock_run_commands() >>>", "calculate the CF for ligand atoms despite including flexible side-chains #SCOLIG # Ends", "self.prepare_uber_dock_protocol() for preparation >>> self.run_uber_dock_protocol() or seperately >>> EPI_uber_dock.calculate_max_radius_from_com() >>> EPI_uber_dock.calculate_cube_edges() >>> EPI_uber_dock.calculate_box_edges_from_com()", "full_g_mmpbsa_data['parts']: # self.run_FlexAid_sim(FlexAid_sample_num, waitTime=waitTime) data = self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)] save_run_name = \"g_mmpbsa_part_{0}\".format(part_num) data.update({'Program': 'g_mmpbsa'}) data.update({'part_num':", "+= Vina # # test = 1 # test = 1 run_mmpbsa_queue =", "finished_jobs_dict) time.sleep(60) print('->' * 10) print('Everything is finished :))))))') print('---' * 10) print('\\n')", "# TODO create folder for run saving state run # filename = self.sim_folder_run", "# key='key_scatter_{0}'.format(worker_address), # retries=retries_num) # tasks_upload.append(task) # print(\"Starting uploading to \", worker_address) test", "rDock=2, FlexAid=2, Vina=2, parallel=False): ''' run uber dock protocol for LeDock, rDock,FlexAid, Vina", ":return: ''' # self.output_receptor_rdock = Outputfile(\"mol2\", \"{0}.mol2\".format(self.receptor_name)) # self.output_receptor_rdock.write(self.receptor_pybel) # self.output_receptor_rdock.close() # #", "run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() if len(self.ledock_directories) == 0: print('Creating rdock folder", "self.run_type_samples, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run_samples, 'directory': self.directories_samples, 'setup': self.setup_box, 'folderCreated': self.folder_exists_samples,", "'indexFile': self.index_file, 'runFolderName': self.run_folder_name, 'folderPath': self.folder_path, 'jsonStates': self.json_state_file, 'runType': self.run_type, 'molName': self.molecule_name, 'receptorName':", "Sim\") self.state_data_samples['simStates'].update({str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'runFinished': False}}) # try: # os.system(command_to_run) #", "save as pandas dataframe Calculate pdb receptor center of mass :return: ''' self.trajectory_mdtraj", "# original_data['AbsFolder'] out_name = abs_folder + os.sep + results_dask['out_filename'] out_mem = results_dask['out_mem'] out_file", "dir_name='vina_sample'): try: dir_names = [] for dirname, dirnames, filenames in os.walk(folder_path): # print(dirname,", "'polar':polar, 'apolar':apolar, 'contrib_MM':contrib_mm, 'contrib_pol':contrib_pol, 'contrib_apol':contrib_apol}) temp_traj.save(abs_file_save) temp_state[str(i)].update({'fileSaved': True }) simStates['simStates'].update(temp_state) self.mdtraj_frames = select_frames", "increases the memory complexity to O(n.d) where d is the average number of", "STEADY # Integer in interval [1,N-1] where N is NUMCHROM STEADNUM 950 #", "# # filename = self.json_state_file # filename = self.absolute_json_state_file # filedata = self.state_data", "try: self.mdtraj_frames = self.state_data['energySoftware']['g_mmpbsa']['frames'] self.mdtraj_parts = self.state_data['energySoftware']['g_mmpbsa']['parts'] self.file_save_list = self.state_data['energySoftware']['g_mmpbsa']['fileList'] self.abs_file_save_list = self.state_data['energySoftware']['g_mmpbsa']['absFileList']", "self.receptor_name + '_' + self.molecule_name + '_' + self.run_type + '.json' if len(self.directories)", "between two samples for them to be considered as in the same neighborhood.", "= pop_item task_g_mmpbsa = client.submit(run_dask_tools.run_gmmpbsa_using_dask, big_future, workers=[workstation_address], key=run_name, retries=retries_num) submitted_jobs_dask.append(task_g_mmpbsa) else: key_name =", "considered if EXCHET is disabled # To include water molecules, uncomment the next", "binary forms, with or without # modification, are permitted provided that the following", "mdp_file = open(mdp_abs, 'r') mdp_mem = mdp_file.read() mdp_filename = mdp_abs.split(os.sep)[-1] index_abs= full_g_mmpbsa_data['indexFile'] index_file", "and initial json configuration :return: ''' self.run_folder_name = self.receptor_name + '_' + self.molecule_name", "the name of the molmolpy Developers nor the names of any # contributors", "not False: # print(\"Running Vina\") # TODO need to think about seed self.save_run_name", "mol_name def set_receptor_name(self, receptor_name): self.receptor_name = receptor_name # This might need to get", "+ self.receptor_name + '_' + self.molecule_name + '_' + self.run_type + '.json' if", "self.state_data['trajectoryFile'] self.mdp_file = self.state_data['mdpFile'] self.tpr_file = self.state_data['tprFile'] self.index_file = self.state_data['indexFile'] self.folder_path = self.state_data['folderPath']", "'EPI.pdbqt' >>> molname = 'EPI' >>> receptor_name = 'LasR' >>> run_type = 'vina_sample'", "for pre_job in run_mmpbsa_queue: # print(pre_job) if pre_job['runFinished'] is False: final_queue_job.append(pre_job) test =", "print(temp) flex_index = temp[-2] flexible_index_list.append(int(flex_index)) temp_line = 'OPTIMZ {0} {1} {2}\\n'.format(self.flexaid_res_number, self.flexaid_res_chain, flex_index)", "as in the same neighborhood. >>> LasR_MOR_mmpbsa_calc = g_mmpbsa_dask.GMMPBSAObject(traj, topol_file, tpr_file, mdp_file, index_file,", "self.run_folder_name def get_samples_run_folder_name(self): curr_folder = os.getcwd() print(\"Yippie yi kay\", curr_folder) return curr_folder +", "'Do not prepare run files' if self.g_mmpbsa_prepared is True: print('Do not prep files')", "same neighborhood. >>> LasR_MOR_mmpbsa_calc = g_mmpbsa_dask.GMMPBSAObject(traj, topol_file, tpr_file, mdp_file, index_file, first_index, second_index, molname,", "LeDock commands') except: self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list': self.ledock_samples}) self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states) for sample_num in self.ledock_samples: self.prep_LeDock_dock_command(sample_num) print('Now continue", ":param sample_num: :param pose_gen: default generate 20 poses :return: ''' try: if self.setup_ledock_pameters", "copy self.before_dask = copy.deepcopy(self.state_data) ################################################################################ if self.g_mmpbsa_prepared is True: full_g_mmpbsa_data = self.state_data['energySoftware']['g_mmpbsa'] test", "1 sec print('Ok %s secs have pass' % (n)) @hlp.timeit def prepVinaSampleCommand(self, sample_num):", "+ os.sep + results_dask['polar_filename'] out_mem = results_dask['polar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close()", "in get_worker_free: # worker_info = get_worker_free[worker] # worker_address = worker_info['preped']['workerAddress'] # # retries_num", "'FLEDIH' in i: print(i) temp = i.split(' ') print(temp) flex_index = temp[-2] flexible_index_list.append(int(flex_index))", "index_filename}) data['dask'].update({'trajMem':traj_mem, 'trajName':traj_filename}) data['dask'].update({'tprName': tpr_filename, 'tprMem': tpr_mem}) data['dask'].update({'mdpName': mdp_filename, 'mdpMem': mdp_mem}) data['dask'].update({'indexName': index_filename,", "self.folder_exists_samples = True self.state_data_samples.update({'receptorFile': self.receptor_file, 'ligandFile': self.ligand_file, 'exhaustivenessList': self.exhaustiveness, 'samples_exhaust': self.samples_exhaust, 'samplesList': self.samples_run,", "Object has been created') self.trajectory_file = traj self.topology_file = topol self.tpr_file = tpr_file", "'g_mmpbsa': sample_num = results[key]['part_num'] results_dask = results[key]['dask'] original_data = self.state_data['energySoftware'][prog] abs_folder = self.g_mmpbsa_folder", "of the shared fitness function # Floats in interval [0.0,1000.0] SHAREALF 4.0 SHAREPEK", "to get modified def find_sample_files(self, folder): try: VIP = [] for dirname, dirnames,", "update_results[key] # results_dask = results[key]['dask'] # else: # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)] = results[key] # if", "self.directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.rdock_folder_name) # print('TADA ', self.directories) test = 1 # This", "hetero groups in the target (water,metal,modified amino acids,cofactors,ligands) # To exclude these groups,", "data['dask'].update({'mdpName': mdp_filename}) data['dask'].update({'indexName': index_filename}) data['dask'].update({'trajMem':traj_mem, 'trajName':traj_filename}) data['dask'].update({'tprName': tpr_filename, 'tprMem': tpr_mem}) data['dask'].update({'mdpName': mdp_filename, 'mdpMem':", "= abs_file_save_list self.simStates = simStates test = 1 self.g_mmpbsa_prepared = True self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames})", "run\\n') print('Vina run type: {0}'.format(self.run_type_samples)) print(self.sim_folder_run_samples) folder_utils.create_folder(self.sim_folder_run_samples) self.folder_exists_samples = True self.state_data_samples.update({'receptorFile': self.receptor_file, 'ligandFile':", "+ os.sep + self.ligand_ledock_mol2 self.receptor_pybel.write(\"pdb\", self.absolute_receptor_ledock_pdb, overwrite=True) self.ligand_pybel.write(\"mol2\", self.absolute_ligand_ledock_mol2, overwrite=True) self.ledock_folder_exists = True", "the above copyright # notice, this list of conditions and the following disclaimer.", "'LeDockFolderName': self.ledock_folder_name}) self.save_state_data_json() self.load_state_called = False self.ledock_title = self.receptor_name + '_' + self.ligand_name", "ADAPTVGA 1 # Adaptive crossover and mutation probabilities # Floats in interval [0.0,1.0]", "self.generate_ga_dat, 'GA_DATA_Abs': self.generate_ga_dat_name_abs, 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() self.save_state_data_json() # TODO this", "self.load_samples_state_data_json(self.load_state_file_samples) self.prep_sample_run = True def get_exhaust_run_folder_name(self): curr_folder = os.getcwd() return curr_folder + os.sep", "if status == 'finished': test = 1 # pop_item = modified_submitted_jobs_dask.pop(i) try: if", "molname = 'EPI' >>> receptor_name = 'LasR' >>> >>> >>> EPI_uber_dock = uber_docker.UberDockerObject(receptor_file,", "self.output_receptor_rdock = Outputfile(\"mol2\", \"{0}.mol2\".format(self.receptor_name)) # self.output_receptor_rdock.write(self.receptor_pybel) # self.output_receptor_rdock.close() # # self.output_ligand_rdock = Outputfile(\"sd\",", "folder_utils.create_folder(self.sim_folder_run_samples) self.folder_exists_samples = True self.state_data_samples.update({'receptorFile': self.receptor_file, 'ligandFile': self.ligand_file, 'exhaustivenessList': self.exhaustiveness, 'samples_exhaust': self.samples_exhaust, 'samplesList':", "self.state_data_samples['molName'] self.receptor_name = self.state_data_samples['receptorName'] # TODO test self.samples_exhaust = self.state_data_samples['samples_exhaust'] self.sim_folder_run_samples = self.state_data_samples['simRunFolder']", "self.json_state_file = self.state_data['jsonStates'] test = 1 # self.rdock_folder_name = self.receptor_name + '_' +", "mdp_file self.index_file = index_file self.first_index = first_index self.second_index = second_index self.prep_g_mmpbsa_run = False", "the above # copyright notice, this list of conditions and the following #", "json saving of information or not? def load_state_data_json(self, filename): ''' :param filename: load", "all neighborhood queries, which increases the memory complexity to O(n.d) where d is", "#ROTOBS # Defines the grid spacing of the binding-site # Float in interval", "THE POSSIBILITY OF SUCH DAMAGE. # ------------------------------------------------------------------------ --> import itertools import time color_iter", "EPI_uber_dock.set_up_Vina_Box() >>> EPI_uber_dock.prepare_Vina_run() >>> EPI_uber_dock.prepVinaSim_uberDock() >>> EPI_uber_dock.runVinaSim_uber() Molecule object loading of pdb and", "os.sep + self.rdock_folder_name # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) # self.directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.rdock_folder_name)", "---------- filename : str, optional The maximum distance between two samples for them", "tpr_file, mdp_file, index_file, first_index, second_index, molname='Unknown', receptor_name='Unknown', folder_path='.', job_name = 'Unknown', load_state_file=None): self.load_state_file", "self.mgltools_utilities = path def set_flexaid_path(self, path): print('FlexAid path is set to ', path)", "i, job in enumerate(submitted_jobs_dask): status = job.status if status == 'finished': test =", "threads, one checks finished simulations, other submits jobs ############################################################################################### gmmbpsa_min_mem = 1000 retries_num", "0 # prepare worker ids for easier switch worker_ids = {} for i,", "states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() ######################################################################################## # # LeDock settings part # #", "'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contrib_apol_filename'] out_mem = results_dask['contrib_apol_mem']", "test value 6 :return: ''' try: self.g_mmpbsa_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] print('No", "self.absolute_receptor_ledock_pdb, 'abs_ligand_mol2': self.absolute_ligand_ledock_mol2, 'LeDockFolderStatus': self.ledock_folder_exists, 'LeDockAbsFolder': self.ledock_absolute_folder_name, 'LeDockFolderName': self.ledock_folder_name}) self.save_state_data_json() self.load_state_called = False", "# Fitness function # Value in [LINEAR,PSHARE] FITMODEL PSHARE # Parameters of the", "= results_dask['energyMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep", "plane definition # Value in [B,R,X] for Bissecting, Radical and Extended radical plane", "\\ \"--exhaustiveness {8} \" \\ \"--num_modes {9} \" \\ \"--seed 10 \" \\", "mdtraj as md from molmolpy.utils.cluster_quality import * from molmolpy.utils import folder_utils import json", "os.getcwd() return curr_folder + os.sep + self.run_folder_name def prepare_g_mmpbsa_dask_protocol(self, dask_client=None, prep_g_mmpbsa=True): ''' prepare", "'exhaustivenessList': self.exhaustiveness, 'samples_exhaust': self.samples_exhaust, 'samplesList': self.samples_run, 'folderPath': self.folder_path, 'runType': self.run_type_samples, 'molName': self.molecule_name, 'receptorName':", "sample_num = results[key]['part_num'] if prog == 'g_mmpbsa': sample_num = results[key]['part_num'] results_dask = results[key]['dask']", "!/usr/bin/env python # # @file __init__.py # @brief G_MMPBSA DASK PROJECT # @author", "verify yolo') # # test = 1 # # try: # self.setup_ledock_pameters =", "consider intramolecular interactions NOINTR # Side-chain rotamer acceptance threshold # Float in interval", "# TODO test self.samples_exhaust = self.state_data_samples['samples_exhaust'] self.sim_folder_run_samples = self.state_data_samples['simRunFolder'] # .split('/')[-1] self.directories_samples =", "self.z_size = self.state_data_samples['boxSettings']['size_z'] self.num_modes = self.state_data_samples['boxSettings']['numModes'] def hold_nSec(self, n): for i in range(1,", "self.generate_ga_dat self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) self.generate_ga_dat_object_file.close() self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) self.state_data['dockSoftware']['FlexAid']['GA_params'].update( {'generateGA_param': self.generate_ga_dat_pameters, 'GA_DataName':", "pdbqt file. 2 types of parsers can be used: 1.molmolpy 2. pybel Stores", "traj_filename = data['fileSave'] data['dask'].update({'tprName': tpr_filename}) data['dask'].update({'mdpName': mdp_filename}) data['dask'].update({'indexName': index_filename}) data['dask'].update({'trajMem':traj_mem, 'trajName':traj_filename}) data['dask'].update({'tprName': tpr_filename,", "try: self.g_mmpbsa_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] print('No need to generate LeDock commands')", "dataframe as well as numpy list. Read more in the :ref:`User Guide <MoleculeObject>`.", "neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- \"\"\" def", "6 :return: ''' try: self.g_mmpbsa_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] print('No need to", "= folder_utils.find_folder_in_path(self.uber_dock_folder, self.rdock_folder_name) # print('TADA ', self.directories) test = 1 # This will", "INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO,", "each flexible bond of the ligand # The allowable flexible bonds are listed", "data['dask'].update({'cavFile':cav_file_mem }) # self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)] = data test = 1 run_g_mmpbsa.append(data) # # result", "{8} \" \\ \"--num_modes {9} \" \\ \"--seed 10 \" \\ \"--log {10}.txt", "out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contrib_apol_filename'] out_mem = results_dask['contrib_apol_mem'] out_file", "file to use pdbnam = 'PDBNAM ' + '{0}\\n\\n'.format( self.receptor_flexaid_mol2) # Specify the", "final_str += optimz1 final_str += optimz2 for y in flexible_index_list_phrases: final_str += y", "os.sep + results_dask['polar_filename'] out_mem = results_dask['polar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name", "run_dask_tools.run_gmmpbsa_using_dask(job_test) test = 1 # Local upload test # big_future = self.dask_prep #", "itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold', 'darkorange']) import multiprocessing import mdtraj as md from molmolpy.utils.cluster_quality", "worker_ids.update({i: id}) custom_index_curr = 3 while len(queue_jobs) > 0: if curr_index == len(queue_jobs):", "self.molecule_name + '_' + self.run_type + '.json' if len(self.directories) == 0: print('Creating folder", "# self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') # self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) # self.generate_ga_dat_object_file.close() # # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}})", "side-chain conformers rather than using the Penultimate Rotamer Library #ROTOBS # Defines the", "folder_path='.', dir_name='vina_sample'): try: dir_names = [] for dirname, dirnames, filenames in os.walk(folder_path): #", "file_save_list.append(file_save) abs_file_save_list.append(abs_file_save) temp_state[str(i)].update({'runFinished':False, 'index':i, 'absFolder':folder_to_save, 'fileSave':file_save, 'absFileSave':abs_file_save, 'firstIndex':self.first_index, 'secondIndex':self.second_index, 'indexFile':self.index_file, 'mdpFile':self.mdp_file, 'tprFile':self.tpr_file}) energy_mm", "for side-chain rotamer acceptance # Float in interval [0.0,1.0] from fully permeable to", "= 1 # # try: # self.setup_ledock_pameters = self.ledock_data['setup_LeDock'] # self.ledock_num_samples = self.ledock_data['num_samples']", "EPI_uber_dock.prep_rDock_dock_run_commands() >>> EPI_uber_dock.run_rDock_simulation(parallel=True, waitTime=15) >>> >>> #This is for FlexAid >>> EPI_uber_dock.prepare_flexaid_settings() >>>", "Genetic-Algorithm # Value of 0 or 1 ADAPTVGA 1 # Adaptive crossover and", "''' try: self.g_mmpbsa_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] print('No need to generate LeDock", "open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contrib_pol_filename'] out_mem =", "if curr_worker == len(worker_ids): curr_worker = 0 print('-----------------------------------------------------------------') worker_status_temp = run_dask_tools.get_dask_worker_status(curr_client, custom_index=custom_index_curr) get_worker_free_temp", ">>> >>> >>> receptor_file = EPI_folder + os.sep + 'centroid.pdb' >>> ligand_file =", "2 types of parsers can be used: 1.molmolpy 2. pybel Stores molecule information", "flexible_index_list.append(int(flex_index)) temp_line = 'OPTIMZ {0} {1} {2}\\n'.format(self.flexaid_res_number, self.flexaid_res_chain, flex_index) flexible_index_list_phrases.append(temp_line) test = 1", "print('MGLTools path is set to ', path) self.mgltools_utilities = path def set_flexaid_path(self, path):", "# self.absolute_path = os.path.abspath(filename) self.load_state_called = True print(os.path.abspath(__file__)) self.state_data = json.load(open(filename, \"r\")) #", "0.90 # Constant mutation probability # Float in interval [0.0,1.0] # Only considered", "#div_traj = math.trunc(traj_len/total_free_cores) select_frames = list(range(0,traj_len,div_traj)) select_indexes = list(range(len(select_frames))) folder_to_save = self.g_mmpbsa_folder temp_mdtraj", "Exhaustiveness for all samples # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories_samples = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name_samples)", "i,traj in zip(select_indexes,select_frames): temp_state = {str(i):{}} temp_traj = self.trajectory_mdtraj[traj:traj+div_traj] temp_mdtraj.append(temp_traj) temp_mdtraj_indexes.append(i) file_save =", "out_pdbqt_name # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = \\ # results[key] update_results = copy.deepcopy(results) update_results[key].pop('dask', None)", "# self.ledock_xmin = self.ledock_data['LeDock_params']['xmin'] # self.ledock_xmax = self.ledock_data['LeDock_params']['xmax'] # self.ledock_ymin = self.ledock_data['LeDock_params']['ymin'] #", "self.receptor_ledock_pdb self.absolute_ligand_ledock_mol2 = self.ledock_absolute_folder_name + os.sep + self.ligand_ledock_mol2 self.receptor_pybel.write(\"pdb\", self.absolute_receptor_ledock_pdb, overwrite=True) self.ligand_pybel.write(\"mol2\", self.absolute_ligand_ledock_mol2,", "Use normalized surfaces in contacts NORMAR # Define the RMSD cutoff between clusters", "def prepare_g_mmpbsa_dask_protocol(self, dask_client=None, prep_g_mmpbsa=True): ''' prepare dask tasks for g_mmpbsa :return: ''' self.prepare_g_mmpbsa()", "you wish to obtain the whole complex SCOOUT # Only calculate the CF", "command_to_run = \"{0} {1}\".format(command_receptor, parm_name) ligand_clear_dok = sample_data['ligand_clear_name'] + '.dok' # -spli MOR_flexaid.dok", "# # # # # TODO this works need to create a quiiee", "self.state_data_samples['exhaustivenessList'] self.samples_run = self.state_data_samples['samplesList'] self.folder_path = self.state_data_samples['folderPath'] self.run_type = self.state_data_samples['runType'] self.molecule_name = self.state_data_samples['molName']", "'contrib_apol':contrib_apol}) temp_traj.save(abs_file_save) temp_state[str(i)].update({'fileSaved': True }) simStates['simStates'].update(temp_state) self.mdtraj_frames = select_frames self.mdtraj_sliced = temp_mdtraj self.mdtraj_parts", "self.ligand_flexaid_initials) # Specify to use one or multiple cleft(s) as binding-site rgnopt_locclf =", "= results_dask['out_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep", ">>> receptor_name = 'LasR' >>> run_type = 'vina_sample' >>> >>> >>> >>> receptor_file", "+ '{0}_ref.pdb\\n\\n'.format( self.ligand_flexaid_initials) final_str += rmsdst final_str += flexaid_config_input_template generate_config_input_file = 'CONFIG_' +", "products derived # from this software without specific prior written permission. # THIS", "self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = \\ # results[key] update_results = copy.deepcopy(results) update_results[key].pop('dask', None) # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num", "= data_pre for part_num in full_g_mmpbsa_data['parts']: # self.run_FlexAid_sim(FlexAid_sample_num, waitTime=waitTime) data = self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)] save_run_name", "import time color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold', 'darkorange']) import multiprocessing import mdtraj", "Then converts to pandas dataframe. Create MoleculeObject by parsing pdb or pdbqt file.", "10) return finished_jobs, finished_jobs_dict # @hlp.timeit def run_dask_gmmpbsa(self, client=None, max_jobs_to_run=10): # from molmolpy.moldock", "= 1 # run_docking_queue += run_docking_rDock[curr_rDock:curr_rDock + rDock] # curr_rDock += rDock #", "self.generate_ga_dat_pameters = True self.generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' + self.ligand_name +", "+ '_' + self.molecule_name + '.json' if filename is None and filedata is", "is not None: self.load_state_data_json(self.load_state_file) else: print('G_MMPBSA Object has been created') self.trajectory_file = traj", "% (n)) @hlp.timeit def prepVinaSampleCommand(self, sample_num): # try: if self.setup_box is not False:", "= path def set_flexaid_path(self, path): print('FlexAid path is set to ', path) self.flexaid_path", "'ligandFile': self.ligand_file, 'exhaustivenessList': self.exhaustiveness, 'samples_exhaust': self.samples_exhaust, 'samplesList': self.samples_run, 'folderPath': self.folder_path, 'runType': self.run_type_samples, 'molName':", "traj, topol, tpr_file, mdp_file, index_file, first_index, second_index, molname='Unknown', receptor_name='Unknown', folder_path='.', job_name = 'Unknown',", "self.file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex': self.first_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex': self.second_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile': self.index_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile': self.mdp_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile':", "test = 1 try: key = list(results.keys())[0] prog = results[key]['Program'] # need [0]", "= open(traj_abs, 'rb') traj_mem = traj_file.read() traj_filename = data['fileSave'] data['dask'].update({'tprName': tpr_filename}) data['dask'].update({'mdpName': mdp_filename})", "''' :param filename: Saves state file :return: ''' # import json # with", "self.mdp_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile': self.tpr_file}) self.save_state_data_json() test = 1 #self.g_mmpbsa_sim_states = self.state_data['energySoftware']['g_mmpbsa']['simStates'] #self.ledock_samples = self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list']", "be considered as in the same neighborhood. >>> LasR_MOR_mmpbsa_calc = g_mmpbsa_dask.GMMPBSAObject(traj, topol_file, tpr_file,", "# .split('/')[-1] self.directories_samples = self.state_data_samples['directory'] self.setup_box = self.state_data_samples['setup'] self.folder_exists = self.state_data_samples['folderCreated'] self.x_center =", "The allowable flexible bonds are listed as FLEDIH lines in Processed_files/BTN.inp # In", "curr_index += 1 curr_worker += 1 time.sleep(10) test = 1 # ############################################################################################### #", "run files' if self.g_mmpbsa_prepared is True: print('Do not prep files') return 'Do not", "temp_mdtraj.append(temp_traj) temp_mdtraj_indexes.append(i) file_save = 'traj_part{0}.xtc'.format(i) abs_file_save = folder_to_save + os.sep + file_save file_save_list.append(file_save)", "self.ledock_absolute_folder_name + os.sep + self.lepro_pdb_file, 'abs_receptor_pdb': self.absolute_receptor_ledock_pdb, 'abs_ligand_mol2': self.absolute_ligand_ledock_mol2, 'LeDockFolderStatus': self.ledock_folder_exists, 'LeDockAbsFolder': self.ledock_absolute_folder_name,", "+= 1 curr_worker += 1 time.sleep(10) test = 1 # ############################################################################################### # #", "False self.folder_exists = False # Running vina,whether it's for exhaustiveness or traditional run", "custom_index_curr = 3 while len(queue_jobs) > 0: if curr_index == len(queue_jobs): curr_index =", "# # # work_address = workstation1_preped['workerAddress'] # # # # # This is", "= 'apolar_{0}.xvg'.format(i) contrib_mm = 'contrib_MM_{0}.dat'.format(i) contrib_pol = 'contrib_pol_{0}.dat'.format(i) contrib_apol = 'contrib_apol_{0}.dat'.format(i) temp_state[str(i)].update({'energyMM':energy_mm, 'polar':polar,", "# self.rdock_absolute_folder_name = self.uber_dock_folder + os.sep + self.rdock_folder_name # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type)", "import * from molmolpy.utils import folder_utils import json from molmolpy.utils import helper as", "# self.uber_dock_folder = self.get_uber_dock_run_folder_name() ######################################################################################## # # LeDock settings part # # self.ledock_data", "conditions are # met: # * Redistributions of source code must retain the", "of conditions and the following disclaimer. # * Redistributions in binary form must", "self.mdtraj_parts = self.state_data['energySoftware']['g_mmpbsa']['parts'] self.file_save_list = self.state_data['energySoftware']['g_mmpbsa']['fileList'] self.abs_file_save_list = self.state_data['energySoftware']['g_mmpbsa']['absFileList'] self.simStates = self.state_data['energySoftware']['g_mmpbsa']['simStates'] test", "print('Updated receptor with LePro\\n') os.chdir(self.uber_dock_folder) self.state_data['dockSoftware']['LeDock'].update( {'receptor_pdb': self.receptor_ledock_pdb, 'ligand_mol2': self.ligand_ledock_mol2, 'lepro_pdb': self.lepro_pdb_file, 'lepro_abs_pdb':", "programs >>> EPI_uber_dock.prep_rDock_dock_run_commands() >>> EPI_uber_dock.run_rDock_simulation(parallel=True, waitTime=15) >>> >>> #This is for FlexAid >>>", "IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY", "self.objects_loaded = False self.g_mmpbsa_prepared = False # This part needs clarification self.prep_mdtraj_object() #", "# sys.exit(0) def get_molecule_name(self): return self.molecule_name def get_receptor_name(self): return self.receptor_name def set_molecule_name(self, mol_name):", "big_future = self.dask_prep # run_dask_tools.upload_g_mmpbsa_files_dask(big_future) #TODO # Scatter a lot better using scatter", "= self.state_data['directory'] self.folder_exists = self.state_data['folderCreated'] self.absolute_json_state_file = self.state_data['absoluteJsonStates'] self.g_mmpbsa_folder = self.state_data['RunFolder'] self.json_state_file =", "self.receptor_ledock_pdb, 'ligand_mol2': self.ligand_ledock_mol2, 'lepro_pdb': self.lepro_pdb_file, 'lepro_abs_pdb': self.ledock_absolute_folder_name + os.sep + self.lepro_pdb_file, 'abs_receptor_pdb': self.absolute_receptor_ledock_pdb,", ">>> EPI_uber_dock.process_flexaid_ligand() >>> EPI_uber_dock.get_flexaid_clefts() >>> EPI_uber_dock.flexaid_generate_ga_dat_parameters() >>> EPI_uber_dock.flexaid_generate_config_input() >>> EPI_uber_dock.prep_FlexAid_dock_run_commands() >>> EPI_uber_dock.run_FlexAid_simulation(parallel=True, waitTime=15)", "# print(\"error in Sample runSim: \", e) # sys.exit(0) def get_molecule_name(self): return self.molecule_name", "= \"{0}.mol2\".format(self.ligand_name) self.absolute_receptor_ledock_pdb = self.ledock_absolute_folder_name + os.sep + self.receptor_ledock_pdb self.absolute_ligand_ledock_mol2 = self.ledock_absolute_folder_name +", "abs_folder + os.sep + results_dask['contrib_pol_filename'] out_mem = results_dask['contrib_pol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem)", "# filename = self.sim_folder_run + os.sep + self.receptor_name + '_' + self.molecule_name +", "\"--out {11}_out.pdbqt\".format(self.receptor_file, self.ligand_file, self.x_center, self.y_center, self.z_center, self.x_size, self.y_size, self.z_size, self.samples_exhaust, self.num_modes, self.save_run_name, self.save_run_name)", "PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR", "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ------------------------------------------------------------------------ -->", "if curr_index == len(queue_jobs): curr_index = 0 if curr_worker == len(worker_ids): curr_worker =", "out_file.close() out_name = abs_folder + os.sep + results_dask['contrib_pol_filename'] out_mem = results_dask['contrib_pol_mem'] out_file =", "self.get_uber_dock_run_folder_name() ######################################################################################## # # LeDock settings part # # self.ledock_data = self.state_data['dockSoftware']['LeDock'] #", "flexible_bonds_data.read() flexible_bonds_data.close() flexible_bonds_data_text_list = flexible_bonds_data_text.split('\\n') flexible_index_list_phrases = [] flexible_index_list = [] for i", "# self.prepare_uber_docker() # self.prepare_ledock_settings() # self.prep_LeDock_dock_run_commands() @hlp.timeit def prep_LeDock_dock_run_commands(self, num_samples=10): ''' Prepare rdock", "receptor_file = EPI_folder + os.sep + 'centroid.pdb' >>> ligand_file = EPI_folder + os.sep", "+= optimz2 for y in flexible_index_list_phrases: final_str += y final_str += '\\n' rmsdst", "command_receptor = self.ledock_path + os.sep + 'ledock_linux_x86' sample_data = self.ledock_input_info[str(sample_num)] parm_name = sample_data['ledock_parm_name']", "# Add receptor name def set_mgltools_path(self, path): print('MGLTools path is set to ',", "empty verify yolo') # # test = 1 def prepare_g_mmpbsa(self): ''' Prepare g_mmpbsa", "= self.ledock_path + os.sep + 'lepro_linux_x86' + ' {0} '.format(self.receptor_ledock_pdb) os.system(command_receptor) self.lepro_pdb_file =", "print(\"Main Process with PID:{}\".format(current_pid)) # free_threads_for_Vina = num_threads - LeDock-rDock-FlexAid run_g_mmpbsa = []", "+ results_dask['polar_filename'] out_mem = results_dask['polar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name =", "self.g_mmpbsa_sim_states = {'simStates': {}} self.objects_loaded = False self.g_mmpbsa_prepared = False # This part", "waitTime=15) >>> >>> #This is for FlexAid >>> EPI_uber_dock.prepare_flexaid_settings() >>> EPI_uber_dock.process_flexaid_ligand() >>> EPI_uber_dock.get_flexaid_clefts()", "original data before transformation # Add receptor name def set_mgltools_path(self, path): print('MGLTools path", "return finished_jobs, finished_jobs_dict # @hlp.timeit def run_dask_gmmpbsa(self, client=None, max_jobs_to_run=10): # from molmolpy.moldock import", "= self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list'] # Divide trajectory to number of free cores # TODO article", "of free cores # TODO article Pagadala Software for molecular docking: a review", "2 async threads, one checks finished simulations, other submits jobs ############################################################################################### gmmbpsa_min_mem =", "finished_jobs_dict) test = 1 ###################################################3 # update index # print(curr_item) # How to", "+ self.rdock_folder_name # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) # self.directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.rdock_folder_name) #", "= self.ledock_data['LeDock_params']['LeDockRMSD'] # # self.ledock_xmin = self.ledock_data['LeDock_params']['xmin'] # self.ledock_xmax = self.ledock_data['LeDock_params']['xmax'] # self.ledock_ymin", "Prepare g_mmpbsa run folder and initial json configuration :return: ''' self.run_folder_name = self.receptor_name", "# TODO should I add json saving of information or not? def load_samples_state_data_json(self,", "met: # * Redistributions of source code must retain the above copyright #", "'contrib_pol_{0}.dat'.format(i) contrib_apol = 'contrib_apol_{0}.dat'.format(i) temp_state[str(i)].update({'energyMM':energy_mm, 'polar':polar, 'apolar':apolar, 'contrib_MM':contrib_mm, 'contrib_pol':contrib_pol, 'contrib_apol':contrib_apol}) temp_traj.save(abs_file_save) temp_state[str(i)].update({'fileSaved': True", "or traditional run self.folder_path = folder_path self.command_run_list = [] self.command_samples_run_list = [] self.molecule_name", "g_mmpbsa run\\n') print(self.sim_folder_run) folder_utils.create_folder(self.sim_folder_run) self.folder_exists = True programs_dict = {'energySoftware': {'g_mmpbsa': {}}} self.state_data.update({'trajectoryFile':", "TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR", "without # modification, are permitted provided that the following conditions are # met:", "value 6 :return: ''' try: self.g_mmpbsa_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] print('No need", "for sample_num in self.ledock_samples: self.prep_LeDock_dock_command(sample_num) print('Now continue for LeDock:D') self.save_state_data_json() test = 1", "1 curr_worker += 1 time.sleep(10) test = 1 # ############################################################################################### # # #", "self.state_data['energySoftware']['g_mmpbsa'] # data_pre.update({'dask': {}}) data_pre = {} data_pre.update({'tprName':tpr_filename, 'tprMem':tpr_mem}) data_pre.update({'mdpName':mdp_filename, 'mdpMem':mdp_mem}) data_pre.update({'indexName':index_filename, 'indexMem':index_mem})", "Processed_files/BTN.inp # In our example, Biotin has 5 flexible bonds flexible_bonds_data = open(", "print('LeDock_params is empty verify yolo') # # try: # self.LeDock_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] #", "0 else: curr_index -= 1 pop_item.update({'workingDir':workstation_dir}) submitted_jobs.append(pop_item) # MAYBE CHECK FOLDER HERE #", "get_worker_free_temp[curr_worker_id] workstation_preped_temp = workstation_info_temp['preped'] workstation_address = workstation_preped_temp['workerAddress'] # This way folder is buggy", "= self.ledock_input_info[str(sample_num)] parm_name = sample_data['ledock_parm_name'] test = 1 self.save_run_name = \"ledock_{0}_sample_{1}\".format(self.run_type, sample_num) random_seed", "flexaid config input file Flexaid is very strict about spaces :return: ''' flexaid_config_input_template", "\"r\")) # os.chdir('HSL_exhaustiveness') self.receptor_file = self.state_data_samples['receptorFile'] self.ligand_file = self.state_data_samples['ligandFile'] self.exhaustiveness = self.state_data_samples['exhaustivenessList'] self.samples_run", "filename = self.json_state_file # filename = self.absolute_json_state_file # filedata = self.state_data self.save_state_data_json(filedata=self.before_dask, filename=self.absolute_json_state_file)", "'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['polar_filename'] out_mem = results_dask['polar_mem']", "number of free cores # TODO article Pagadala Software for molecular docking: a", "Number of chromosomes (number individuals in the population) # Integer in interval [1-N]", "= 1 high_seed = 999999999 mgltools_utilities = '/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24' class GMMPBSAObject(object): \"\"\" Usage example", "data, # # workers=[work_address], # # key='key_test', # # retries=retries_num) # # #", "# self.prepVinaSim_exhaust() self.save_state_data_json() self.load_state_called = False else: self.load_state_file = self.json_state_file self.load_state_called = True", "the next line #INCHOH # Permeability allowed between atoms # Float in interval", "@hlp.timeit def prep_LeDock_dock_command(self, sample_num, pose_gen=20): ''' prepare each separate rDock run command :param", "# sys.exit() print(\"LeDock command generation finished\") else: print('Please setup LeDock settings') except Exception", "\" \\ \"--exhaustiveness {8} \" \\ \"--num_modes {9} \" \\ \"--seed 10 \"", "out_file.close() # out_pdbqt_filename = out_pdbqt_name # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = \\ # results[key] update_results", "finished_jobs_dict = {} worker_status_free = None test = 1 # maybe 2 async", "[] finished_jobs_dict = {} worker_status_free = None test = 1 # maybe 2", "TODO should I add json saving of information or not? def load_samples_state_data_json(self, filename):", "runSim: \", e) sys.exit(0) @hlp.timeit def check_dask_jobs(self, submitted_jobs_dask, finished_jobs, finished_jobs_dict): import copy #", "worker_status = run_dask_tools.get_dask_worker_status(curr_client) # # get_worker_free = run_dask_tools.check_free_resources(worker_status) # # # test =", "flexaid docking :return: ''' self.flexaid_ga_dat_param_template = '''# Number of chromosomes (number individuals in", "BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN", "get_worker_free: # worker_info = get_worker_free[worker] # worker_address = worker_info['preped']['workerAddress'] # # retries_num =", ": str, optional The maximum distance between two samples for them to be", "software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE", "# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #", "except KeyboardInterrupt: # # quit # sys.exit() print(\"LeDock command generation finished\") else: print('Please", "information about run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() ######################################################################################## # # LeDock settings", "1 # # try: # self.setup_ledock_pameters = self.ledock_data['setup_LeDock'] # self.ledock_num_samples = self.ledock_data['num_samples'] #", "open(filename, \"wb\")) # TODO create folder for run saving state run # filename", "# TODO This part needs further refinement # # # break # #", "results_dask['contribMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep +", "'lepro_linux_x86' + ' {0} '.format(self.receptor_ledock_pdb) os.system(command_receptor) self.lepro_pdb_file = 'pro.pdb' # Need to check", "of the processed ligand with residue number 9999 and chain A # Translational", "= '/media/Work/MEGA/Programming/StressHormones/' >>> >>> >>> receptor_file = EPI_folder + os.sep + 'centroid_model_clust2.pdbqt' >>>", "= self.state_data['energySoftware']['g_mmpbsa']['frames'] self.mdtraj_parts = self.state_data['energySoftware']['g_mmpbsa']['parts'] self.file_save_list = self.state_data['energySoftware']['g_mmpbsa']['fileList'] self.abs_file_save_list = self.state_data['energySoftware']['g_mmpbsa']['absFileList'] self.simStates =", "not consider intramolecular interactions NOINTR # Side-chain rotamer acceptance threshold # Float in", "specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "receptor_name = 'LasR' >>> >>> >>> EPI_uber_dock = uber_docker.UberDockerObject(receptor_file, ligand_file, '.', molname=molname, receptor_name=receptor_name)", "# # self.output_ligand_rdock = Outputfile(\"sd\", \"{0}.sd\".format(self.ligand_name)) # self.output_ligand_rdock.write(self.ligand_pybel ) # self.output_ligand_rdock.close() self.ledock_folder_name =", "# self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name) print('TADA ', self.directories) self.json_state_file", "The maximum distance between two samples for them to be considered as in", "# test = 1 # print('Last Check of submitted jobs') while len(finished_jobs) !=", "= 1 # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.ledock_directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.ledock_folder_name) print('TADA ',", "interval [0.1,1.0] SPACER 0.375 # Exclude hetero groups in the target (water,metal,modified amino", "about run states if len(self.directories_samples) == 0: print('Creating folder for vina samples run\\n')", "filename = self.absolute_json_state_file else: filedata = filedata filename = filename json.dump(filedata, open(filename, \"w\"),", "= abs_folder + os.sep + results_dask['contrib_apol_filename'] out_mem = results_dask['contrib_apol_mem'] out_file = open(out_name, 'w')", ":return: ''' try: self.g_mmpbsa_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] print('No need to generate", "filename = filename json.dump(filedata, open(filename, \"w\"), sort_keys=True, indent=4) # TODO should I add", "= self.state_data_samples['setup'] self.folder_exists = self.state_data_samples['folderCreated'] self.x_center = self.state_data_samples['boxSettings']['center_x'] self.y_center = self.state_data_samples['boxSettings']['center_y'] self.z_center =", "'tprMem':tpr_mem}) data_pre.update({'mdpName':mdp_filename, 'mdpMem':mdp_mem}) data_pre.update({'indexName':index_filename, 'indexMem':index_mem}) self.dask_prep = data_pre for part_num in full_g_mmpbsa_data['parts']: #", "# Integer in interval [1,N] where N is NUMCHROM PRINTCHR 10 ''' self.generate_ga_dat_pameters", "False # Running vina,whether it's for exhaustiveness or traditional run self.folder_path = folder_path", "tpr_abs.split(os.sep)[-1] # mdp_abs= full_g_mmpbsa_data['mdpFile'] mdp_file = open(mdp_abs, 'r') mdp_mem = mdp_file.read() mdp_filename =", "Values in [BOOM,STEADY] REPMODEL BOOM # Fraction of population to create # Only", "needs to be thought out #################################################################################################################### def flexaid_generate_ga_dat_parameters_dask(self): ''' Generate GA dat parameters", "= True self.load_samples_state_data_json(self.load_state_file_samples) self.prep_sample_run = True def get_exhaust_run_folder_name(self): curr_folder = os.getcwd() return curr_folder", "that the following conditions are # met: # * Redistributions of source code", "self.state_data['dockSoftware']['LeDock'].update( {'receptor_pdb': self.receptor_ledock_pdb, 'ligand_mol2': self.ligand_ledock_mol2, 'lepro_pdb': self.lepro_pdb_file, 'lepro_abs_pdb': self.ledock_absolute_folder_name + os.sep + self.lepro_pdb_file,", "for LeDock:D') self.save_state_data_json() test = 1 self.prep_LeDock_run = True @hlp.timeit def prep_LeDock_dock_command(self, sample_num,", "# self.ledock_input_info = self.ledock_data['LeDockInputInfo'] # self.param_ledock_template = self.ledock_data['paramFull'] # except: # print('LeDock setting", "print(\"Launching new Sim\") self.state_data_samples['simStates'].update({str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'runFinished': False}}) # try: #", "len(run_docking_queue) != 40: # run_docking_queue += run_docking_LeDock[curr_LeDock:curr_LeDock + LeDock] # curr_LeDock += LeDock", "state file :return: ''' # import json # with open(filename, 'w') as outfile:", "molname, receptor_name) >>> >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client) >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True) >>> #", "free_threads_for_Vina = num_threads - LeDock-rDock-FlexAid run_g_mmpbsa = [] run_mmpbsa_queue = [] # Prepare", "# TODO should I add json saving of information or not? def load_state_data_json(self,", "+ os.sep + results_dask['apolar_filename'] out_mem = results_dask['apolar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close()", "in os.walk(folder_path): # print(dirname, '-') if dir_name in dirname: # # print(dir_name) dir_names.append(dirname)", "WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF", "permitted provided that the following conditions are # met: # * Redistributions of", "is for FlexAid >>> EPI_uber_dock.prepare_flexaid_settings() >>> EPI_uber_dock.process_flexaid_ligand() >>> EPI_uber_dock.get_flexaid_clefts() >>> EPI_uber_dock.flexaid_generate_ga_dat_parameters() >>> EPI_uber_dock.flexaid_generate_config_input()", "initial json configuration :return: ''' self.run_folder_name = self.receptor_name + '_' + self.molecule_name +", "{}}) self.state_data.update(programs_dict) # self.prepVinaSim_exhaust() self.save_state_data_json() self.load_state_called = False else: self.load_state_file = self.json_state_file self.load_state_called", "between atoms # Float in interval [0.0,1.0] from fully permeable to no permeability", "\"--size_y {6} \" \\ \"--size_z {7} \" \\ \"--exhaustiveness {8} \" \\ \"--num_modes", "self.state_data_samples['receptorFile'] self.ligand_file = self.state_data_samples['ligandFile'] self.exhaustiveness = self.state_data_samples['exhaustivenessList'] self.samples_run = self.state_data_samples['samplesList'] self.folder_path = self.state_data_samples['folderPath']", "'Do not prep files' traj_len = len(self.trajectory_mdtraj) import math # Free core approach", "NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #", "list of conditions and the following disclaimer. # * Redistributions in binary form", "of surfaces in contact COMPLF VCT # Do not consider intramolecular interactions NOINTR", "out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contrib_pol_filename'] out_mem = results_dask['contrib_pol_mem'] out_file", "operator # Intragenic crossovers are possible INTRAGEN # Specifies that the initial population", "# self.ledock_rmsd = self.ledock_data['LeDock_params']['LeDockRMSD'] # # self.ledock_xmin = self.ledock_data['LeDock_params']['xmin'] # self.ledock_xmax = self.ledock_data['LeDock_params']['xmax']", "client.upload_file # task = client.submit(run_dask_tools.upload_g_mmpbsa_files_dask, # big_future, # workers=[worker_address], # key='key_scatter_{0}'.format(worker_address), # retries=retries_num)", "+ '_' + self.molecule_name + '_' + self.run_type_samples + '.json' # This will", "each separate rDock run command :param sample_num: :param pose_gen: default generate 20 poses", "1 # TODO enter ledock folder and process structure for docking using lepro", "self.y_size, self.z_size, self.samples_exhaust, self.num_modes, self.save_run_name, self.save_run_name) print(command_to_run) self.command_samples_run_list.append(command_to_run) print(\"Launching new Sim\") self.state_data_samples['simStates'].update({str(sample_num): {'save_run_name':", "checks finished simulations, other submits jobs ############################################################################################### gmmbpsa_min_mem = 1000 retries_num = 2", "# Try to load initial LeDock try: self.mdtraj_frames = self.state_data['energySoftware']['g_mmpbsa']['frames'] self.mdtraj_parts = self.state_data['energySoftware']['g_mmpbsa']['parts']", "test = 1 self.prep_LeDock_run = True @hlp.timeit def prep_LeDock_dock_command(self, sample_num, pose_gen=20): ''' prepare", "= standard_exhaust self.samples_run = list(range(1, num_samples_run + 1)) self.run_folder_name_samples = self.receptor_name + '_'", "when REPMODEL is BOOM BOOMFRAC 1.0 # Number of new individuals to generate", "TODO should I add json saving of information or not? def load_state_data_json(self, filename):", "queue_jobs = self.run_mmpbsa_dask job_quantity = len(queue_jobs) finished_jobs = [] finished_jobs_dict = {} worker_status_free", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE,", "finished_jobs, finished_jobs_dict # @hlp.timeit def run_dask_gmmpbsa(self, client=None, max_jobs_to_run=10): # from molmolpy.moldock import run_dask_tools", "be for leDock # if prep_g_mmpbsa is True: # # self.prepare_uber_docker() # self.prepare_ledock_settings()", "'LeDockFolderStatus': self.ledock_folder_exists, 'LeDockAbsFolder': self.ledock_absolute_folder_name, 'LeDockFolderName': self.ledock_folder_name}) self.save_state_data_json() self.load_state_called = False self.ledock_title = self.receptor_name", "= list(results.keys())[0] prog = results[key]['Program'] # need [0] key sample_num = results[key]['part_num'] if", "+ '.dat' generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat return [generate_ga_dat, ] #", "= {str(i):{}} temp_traj = self.trajectory_mdtraj[traj:traj+div_traj] temp_mdtraj.append(temp_traj) temp_mdtraj_indexes.append(i) file_save = 'traj_part{0}.xtc'.format(i) abs_file_save = folder_to_save", "json saving of information or not? def load_samples_state_data_json(self, filename): ''' :param filename: load", "CPU to cool down # self.hold_nSec(5) print('This success ---> ', i) except Exception", "to use pdbnam = 'PDBNAM ' + '{0}\\n\\n'.format( self.receptor_flexaid_mol2) # Specify the processed", "self.uber_dock_folder + os.sep + self.ledock_folder_name test = 1 # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type)", "self.state_data['energySoftware']['g_mmpbsa']['parts'] self.file_save_list = self.state_data['energySoftware']['g_mmpbsa']['fileList'] self.abs_file_save_list = self.state_data['energySoftware']['g_mmpbsa']['absFileList'] self.simStates = self.state_data['energySoftware']['g_mmpbsa']['simStates'] test = 1", "# self.hold_nSec(5) print('This success ---> ', i) except Exception as error: print('error is", "\", worker_address) test = 1 # TODO # This part runs the main", "+ os.sep + results_dask['contrib_pol_filename'] out_mem = results_dask['contrib_pol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close()", "4.0 SHAREPEK 5.0 SHARESCL 10.0 # Reproduction model # Values in [BOOM,STEADY] REPMODEL", "inside pdbqt file # elif 'vina_sample_' in i: # VIP.append(i) return VIP except", "self.json_state_file self.load_state_called = True self.load_state_data_json(self.load_state_file) def prepare_ledock_settings(self): ''' Prepare ultraDock folder and initial", "results_dask = results[key]['dask'] original_data = self.state_data['energySoftware'][prog] abs_folder = self.g_mmpbsa_folder # original_data['AbsFolder'] out_name =", "self.simStates = simStates test = 1 self.g_mmpbsa_prepared = True self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared})", "retries=retries_num) submitted_jobs_dask.append(task_g_mmpbsa) else: key_name = curr_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Passed running ',", "output scored atoms in the final results # Comment the next line if", "curr_folder = os.getcwd() print(\"Yippie yi kay\", curr_folder) return curr_folder + os.sep + self.run_folder_name_samples", "contrib_mm = 'contrib_MM_{0}.dat'.format(i) contrib_pol = 'contrib_pol_{0}.dat'.format(i) contrib_apol = 'contrib_apol_{0}.dat'.format(i) temp_state[str(i)].update({'energyMM':energy_mm, 'polar':polar, 'apolar':apolar, 'contrib_MM':contrib_mm,", "topol, tpr_file, mdp_file, index_file, first_index, second_index, molname='Unknown', receptor_name='Unknown', folder_path='.', job_name = 'Unknown', load_state_file=None):", "+= 1 time.sleep(10) test = 1 # ############################################################################################### # # # work_address =", "if filename is None and filedata is None: # filename = self.json_state_file filename", "TODO test self.samples_exhaust = self.state_data_samples['samples_exhaust'] self.sim_folder_run_samples = self.state_data_samples['simRunFolder'] # .split('/')[-1] self.directories_samples = self.state_data_samples['directory']", "enumerate(get_worker_free): worker_ids.update({i: id}) custom_index_curr = 3 while len(queue_jobs) > 0: if curr_index ==", "flexaid_generate_config_input_dask(self): ''' Generate flexaid config input file Flexaid is very strict about spaces", "flexible side-chains #SCOLIG # Ends reading of CONFIG file ENDINP ''' final_str =", "interval [1.0-30.0] VARFLX 10.0 # Use Vcontacts in the calculations of surfaces in", ">>> EPI_folder = '/media/Work/MEGA/Programming/StressHormones/dock_EPI' >>> EPI_samples = '/media/Work/MEGA/Programming/StressHormones/' >>> >>> >>> receptor_file =", "'Unknown', load_state_file=None): self.load_state_file = load_state_file if load_state_file is not None: self.load_state_data_json(self.load_state_file) else: print('G_MMPBSA", "job.status if status == 'finished': test = 1 # pop_item = modified_submitted_jobs_dask.pop(i) try:", "mgltools_utilities = '/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24' class GMMPBSAObject(object): \"\"\" Usage example >>> EPI_folder = '/media/Work/MEGA/Programming/StressHormones/dock_EPI' >>>", "# retries_num = 2 # # # Upload files to all clients client.upload_file", "+ self.molecule_name + '_' + 'rDock' # self.rdock_absolute_folder_name = self.uber_dock_folder + os.sep +", "curr_index = 0 else: curr_index -= 1 pop_item.update({'workingDir':workstation_dir}) submitted_jobs.append(pop_item) # MAYBE CHECK FOLDER", "COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,", "> gmmbpsa_min_mem and jobs_running <max_jobus: print('Submit MMPBSA job to DASK') pop_item = queue_jobs.pop(curr_index)", "NUMCHROM STEADNUM 950 # Number of TOP individuals to print in console #", "= True # except: # print('LeDock_params simStates is empty verify yolo') # #", "'vina_sample_' in i: # VIP.append(i) return VIP except Exception as e: print(\"error in", "run_docking_FlexAid + run_docking_Vina final_queue_job = [] # Need to select those that are", "in console # Integer in interval [1,N] where N is NUMCHROM PRINTCHR 10", "') print(temp) flex_index = temp[-2] flexible_index_list.append(int(flex_index)) temp_line = 'OPTIMZ {0} {1} {2}\\n'.format(self.flexaid_res_number, self.flexaid_res_chain,", "optimz1 final_str += optimz2 for y in flexible_index_list_phrases: final_str += y final_str +=", "+ os.sep + self.receptor_name + '_' + self.molecule_name + '_' + self.run_type +", "self.ledock_absolute_folder_name + os.sep + self.receptor_ledock_pdb self.absolute_ligand_ledock_mol2 = self.ledock_absolute_folder_name + os.sep + self.ligand_ledock_mol2 self.receptor_pybel.write(\"pdb\",", "'fileSave':file_save, 'absFileSave':abs_file_save, 'firstIndex':self.first_index, 'secondIndex':self.second_index, 'indexFile':self.index_file, 'mdpFile':self.mdp_file, 'tprFile':self.tpr_file}) energy_mm = 'energy_MM_{0}.xvg'.format(i) polar = 'polar_{0}.xvg'.format(i)", "None) # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = results[key] # self.state_data['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key]", "1 try: key = list(results.keys())[0] prog = results[key]['Program'] # need [0] key sample_num", "worker_info['preped']['workerAddress'] # # retries_num = 2 # # # Upload files to all", "# Define the RMSD cutoff between clusters # Float in interval [0.5,3.0] CLRMSD", "= self.state_data_samples['boxSettings']['center_y'] self.z_center = self.state_data_samples['boxSettings']['center_z'] self.x_size = self.state_data_samples['boxSettings']['size_x'] self.y_size = self.state_data_samples['boxSettings']['size_y'] self.z_size =", "'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run, 'RunFolder': self.g_mmpbsa_folder, 'absoluteJsonStates': self.absolute_json_state_file, 'directory': self.directories, 'folderCreated': self.folder_exists, 'simStates':", "KeyboardInterrupt: # # quit # sys.exit() print(\"Vina sample run command prep finished\") else:", "self.lepro_pdb_file = 'pro.pdb' # Need to check whteter lepro ran fine print('Updated receptor", "# with open(filename, 'w') as outfile: # json.dump(self.cluster_models, outfile) # pickle.dump(self.cluster_models, open(filename, \"wb\"))", "for all samples # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories_samples = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name_samples) print('TADA", "= [] simStates = {'simStates':{}} for i,traj in zip(select_indexes,select_frames): temp_state = {str(i):{}} temp_traj", "self.absolute_json_state_file filedata = self.state_data # elif filedata is not None: # filedata =", "folder for run saving state run # filename = self.sim_folder_run + os.sep +", "# # # TODO This part needs further refinement # # # break", "self.abs_file_save_list = abs_file_save_list self.simStates = simStates test = 1 self.g_mmpbsa_prepared = True self.state_data['energySoftware']['g_mmpbsa'].update({'frames':", "prep_g_mmpbsa is True: # # self.prepare_uber_docker() # self.prepare_ledock_settings() # self.prep_LeDock_dock_run_commands() @hlp.timeit def prep_LeDock_dock_run_commands(self,", "# Defines the grid spacing of the binding-site # Float in interval [0.1,1.0]", "'-' + self.ligand_name + '.dat' self.generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat self.generate_ga_dat_object_file", "self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) # self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) # self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) # self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList':", "ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT", "OF SUCH DAMAGE. # ------------------------------------------------------------------------ --> import itertools import time color_iter = itertools.cycle(['navy',", "prep files' traj_len = len(self.trajectory_mdtraj) import math # Free core approach div_traj =", "implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where", "self.state_data['dockSoftware']['LeDock']['simStates'] self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] print('No need to generate LeDock commands') except: self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list': self.ledock_samples})", "# # retries_num = 2 # # # Upload files to all clients", "= 1 tpr_abs= full_g_mmpbsa_data['tprFile'] tpr_file = open(tpr_abs, 'rb') tpr_mem = tpr_file.read() tpr_filename =", "'polar_{0}.xvg'.format(i) apolar = 'apolar_{0}.xvg'.format(i) contrib_mm = 'contrib_MM_{0}.dat'.format(i) contrib_pol = 'contrib_pol_{0}.dat'.format(i) contrib_apol = 'contrib_apol_{0}.dat'.format(i)", "receptor and sd for ligand :return: ''' # self.output_receptor_rdock = Outputfile(\"mol2\", \"{0}.mol2\".format(self.receptor_name)) #", "'dokFileName':ligand_clear_dok, 'runFinished': False}} self.LeDock_sim_states.update(temp_dict) self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict) # try: # os.system(command_to_run) # except KeyboardInterrupt: #", "# BTN.inp has the unique RESNUMC identifier LIG9999A inplig = 'INPLIG ' +", "new individuals to generate at each generation # Only considered when REPMODEL is", ">>> EPI_uber_dock.prep_FlexAid_dock_run_commands() >>> EPI_uber_dock.run_FlexAid_simulation(parallel=True, waitTime=15) >>> >>> >>> # This is for Autodock", "e) # sys.exit(0) def get_molecule_name(self): return self.molecule_name def get_receptor_name(self): return self.receptor_name def set_molecule_name(self,", "= [] for dirname, dirnames, filenames in os.walk(folder): for i in filenames: #", "if len(self.directories) == 0: print('Creating folder for g_mmpbsa run\\n') print(self.sim_folder_run) folder_utils.create_folder(self.sim_folder_run) self.folder_exists =", "True print(os.path.abspath(__file__)) self.state_data_samples = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.receptor_file = self.state_data_samples['receptorFile'] self.ligand_file =", "results_dask['contrib_pol_filename'] out_mem = results_dask['contrib_pol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() # out_pdbqt_filename =", "example, Biotin has 5 flexible bonds flexible_bonds_data = open( self.flexaid_absolute_processed_files_folder + os.sep +", "= 1 # TODO # This part runs the main program submitted_jobs =", "# filedata = filedata # filename = self.absolute_json_state_file else: filedata = filedata filename", "self.folder_exists = False # Running vina,whether it's for exhaustiveness or traditional run self.folder_path", "''' prepare each separate rDock run command :param sample_num: :param pose_gen: default generate", "copy.deepcopy(self.state_data) ################################################################################ if self.g_mmpbsa_prepared is True: full_g_mmpbsa_data = self.state_data['energySoftware']['g_mmpbsa'] test = 1 tpr_abs=", "in interval [0.0,1.0] from fully permeable to no permeability ROTPER 0.8 # Solvent", "part_num}) data.update({'save_run_name': save_run_name}) data.update({'dask': {}}) traj_abs = data['absFileSave'] traj_file = open(traj_abs, 'rb') traj_mem", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS", "for part_num in full_g_mmpbsa_data['parts']: # self.run_FlexAid_sim(FlexAid_sample_num, waitTime=waitTime) data = self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)] save_run_name = \"g_mmpbsa_part_{0}\".format(part_num)", "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY", "else: filedata = filedata filename = filename json.dump(filedata, open(filename, \"w\"), sort_keys=True, indent=4) #", "prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "self.save_run_name, 'commandRun': command_to_run, 'runFinished': False}}) # try: # os.system(command_to_run) # except KeyboardInterrupt: #", "self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] # print('No need to generate LeDock commands') # self.prep_LeDock_run = True #", "samples for them to be considered as in the same neighborhood. >>> LasR_MOR_mmpbsa_calc", "where N is NUMCHROM PRINTCHR 10 ''' self.generate_ga_dat_pameters = True self.generate_ga_dat = 'ga_inp_'", "+ 'centroid_model_clust2.pdbqt' >>> ligand_file = EPI_folder + os.sep + 'EPI.pdbqt' >>> molname =", ">>> >>> >>> EPI_uber_dock.prepare_uber_dock_protocol() >>> EPI_uber_dock.run_uber_dock_protocol() Use together >>> self.prepare_uber_dock_protocol() for preparation >>>", "1.0 # Number of new individuals to generate at each generation # Only", "[1,N-1] where N is NUMCHROM STEADNUM 950 # Number of TOP individuals to", "EPI_uber_dock.run_uber_dock_protocol() Use together >>> self.prepare_uber_dock_protocol() for preparation >>> self.run_uber_dock_protocol() or seperately >>> EPI_uber_dock.calculate_max_radius_from_com()", "in range(1, n + 1): print(i) time.sleep(1) # Delay for 1 sec print('Ok", "permeability PERMEA 0.9 # Permeability for side-chain rotamer acceptance # Float in interval", "# Running vina,whether it's for exhaustiveness or traditional run self.folder_path = folder_path self.command_run_list", "[] flexible_index_list = [] for i in flexible_bonds_data_text_list: if 'FLEDIH' in i: print(i)", "# @author <NAME> # # <!-------------------------------------------------------------------------- # # Copyright (c) 2016-2019,<NAME>. # All", "folder_to_save = self.g_mmpbsa_folder temp_mdtraj = [] temp_mdtraj_indexes = [] file_save_list = [] abs_file_save_list", "= run_g_mmpbsa # run_docking_queue = run_docking_LeDock + run_docking_FlexAid + run_docking_Vina final_queue_job = []", "= self.state_data['energySoftware']['g_mmpbsa']['prepare'] # self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) # self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) # self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) # self.state_data['energySoftware']['g_mmpbsa'].update({'fileList':", "generation finished\") else: print('Please setup LeDock settings') except Exception as e: print(\"error in", "# os.chdir('HSL_exhaustiveness') self.trajectory_file = self.state_data['trajectoryFile'] self.mdp_file = self.state_data['mdpFile'] self.tpr_file = self.state_data['tprFile'] self.index_file =", "list(range(total_free_cores)) # Maximum parallel #div_traj = math.trunc(traj_len/total_free_cores) select_frames = list(range(0,traj_len,div_traj)) select_indexes = list(range(len(select_frames)))", "= 'contrib_MM_{0}.dat'.format(i) contrib_pol = 'contrib_pol_{0}.dat'.format(i) contrib_apol = 'contrib_apol_{0}.dat'.format(i) temp_state[str(i)].update({'energyMM':energy_mm, 'polar':polar, 'apolar':apolar, 'contrib_MM':contrib_mm, 'contrib_pol':contrib_pol,", "else: self.load_state_file = self.json_state_file self.load_state_called = True self.load_state_data_json(self.load_state_file) def prepare_ledock_settings(self): ''' Prepare ultraDock", "run_docking_Vina[curr_Vina:curr_Vina + Vina] # curr_Vina += Vina # # test = 1 #", "GA dat parameters for flexaid docking :return: ''' self.flexaid_ga_dat_param_template = '''# Number of", "print('----------------TEST------------------') curr_item = queue_jobs[curr_index] test = 1 curr_worker_id = worker_ids[curr_worker] workstation_info_temp = get_worker_free_temp[curr_worker_id]", "str, optional The maximum distance between two samples for them to be considered", "= worker_info['preped']['workerAddress'] # # retries_num = 2 # # # Upload files to", "+ '-' + self.ligand_name + '.dat' generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat", "PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY,", "os.sep + results_dask['out_filename'] out_mem = results_dask['out_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name", "yi kay\", curr_folder) return curr_folder + os.sep + self.run_folder_name_samples def save_state_data_json(self, filedata=None, filename=None):", "json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.receptor_file = self.state_data_samples['receptorFile'] self.ligand_file = self.state_data_samples['ligandFile'] self.exhaustiveness = self.state_data_samples['exhaustivenessList']", "and use in source and binary forms, with or without # modification, are", "the interaction matrix # Float in interval [-200.0,200.0] SLVPEN 0.0 # Use Vcontacts", "False self.prep_sample_run = True else: self.load_state_file_samples = self.json_samples_state_file self.load_state_called_samples = True self.load_samples_state_data_json(self.load_state_file_samples) self.prep_sample_run", "1 # Adaptive crossover and mutation probabilities # Floats in interval [0.0,1.0] ADAPTKCO", "except Exception as e: print(\"error in runSim: \", e) sys.exit(0) @hlp.timeit def check_dask_jobs(self,", "open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contrib_apol_filename'] out_mem =", "self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name) print('TADA ', self.directories) self.json_state_file =", "self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # # # self.state_data_samples = self.state_data.copy()", "self.run_folder_name # Create folder don't forget # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories =", "= file_save_list self.abs_file_save_list = abs_file_save_list self.simStates = simStates test = 1 self.g_mmpbsa_prepared =", "Exception as error: print('error is ', error) # print('i is ', i) print('Finished", "RMSD cutoff between clusters # Float in interval [0.5,3.0] CLRMSD 2.0 # Number", "return curr_folder + os.sep + self.run_folder_name def prepare_g_mmpbsa_dask_protocol(self, dask_client=None, prep_g_mmpbsa=True): ''' prepare dask", "CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY", "files # test = 1 # tasks_upload = [] # big_future = client.scatter(self.dask_prep,", "LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client) >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True) >>> # >>> # LasR_MOR_mmpbsa_calc.run_dask_docking(client) Notes ----- See", "= {} for i, id in enumerate(get_worker_free): worker_ids.update({i: id}) custom_index_curr = 3 while", "False: print('Please setup simulation box') sys.exit(0) self.run_type_samples = run_type self.prep_samples_run = True self.samples_exhaust", "self.g_mmpbsa_folder + os.sep + self.receptor_name + '_' + self.molecule_name + '_' + self.run_type", "function # Floats in interval [0.0,1000.0] SHAREALF 4.0 SHAREPEK 5.0 SHARESCL 10.0 #", "''' Prepare rdock run commands and save to json :param num_samples: test value", "# Vcontacts plane definition # Value in [B,R,X] for Bissecting, Radical and Extended", "the processed ligand with residue number 9999 and chain A # Translational DOF", "filename json.dump(filedata, open(filename, \"w\"), sort_keys=True, indent=4) # TODO should I add json saving", "rights reserved. # Redistribution and use in source and binary forms, with or", "self.command_samples_run_list.append(command_to_run) print(\"Launching new Sim\") self.state_data_samples['simStates'].update({str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'runFinished': False}}) # try:", "receptor mdtraj object get mdtraj topology and save as pandas dataframe Calculate pdb", "RANDOM # Fitness function # Value in [LINEAR,PSHARE] FITMODEL PSHARE # Parameters of", "# print(\"Running Vina\") # TODO need to think about seed #./ ledock_linux_x86 dock.", "quit # sys.exit() print(\"Vina sample run command prep finished\") else: print('Please setup vina", "out_name = abs_folder + os.sep + results_dask['contribMM_filename'] out_mem = results_dask['contribMM_mem'] out_file = open(out_name,", "False: # print(\"Running Vina\") # TODO need to think about seed #./ ledock_linux_x86", "structure for docking using lepro # ./lepro_linux_x86 LasR_flexaid.pdb os.chdir(self.ledock_absolute_folder_name) command_receptor = self.ledock_path +", "= EPI_folder + os.sep + 'centroid_model_clust2.pdbqt' >>> ligand_file = EPI_folder + os.sep +", "= 1 self.prep_LeDock_run = True @hlp.timeit def prep_LeDock_dock_command(self, sample_num, pose_gen=20): ''' prepare each", "[] for dirname, dirnames, filenames in os.walk(folder): for i in filenames: # print", "# Include water molecules in the target (always removed by default) # Only", "# quit # sys.exit() print(\"LeDock command generation finished\") else: print('Please setup LeDock settings')", "# else: # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)] = results[key] # if filename is None and filedata", "<max_jobus: print('Submit MMPBSA job to DASK') pop_item = queue_jobs.pop(curr_index) key_name = pop_item['save_run_name'] run_name", "in contacts NORMAR # Define the RMSD cutoff between clusters # Float in", "is set to ', path) self.ledock_path = path def prep_mdtraj_object(self): ''' Prepare receptor", "1 curr_client = dask_client # Testing Phase total_free_cores = 16 # Production #", "\\ # results[key] update_results = copy.deepcopy(results) update_results[key].pop('dask', None) # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = results[key]", "= self.state_data['molName'] self.receptor_name = self.state_data['receptorName'] # TODO test self.sim_folder_run = self.state_data['simRunFolder'] # .split('/')[-1]", "prepare worker ids for easier switch worker_ids = {} for i, id in", "time color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold', 'darkorange']) import multiprocessing import mdtraj as", "'/media/Work/MEGA/Programming/StressHormones/dock_EPI' >>> EPI_samples = '/media/Work/MEGA/Programming/StressHormones/' >>> >>> >>> receptor_file = EPI_folder + os.sep", "run_dask_tools.get_dask_worker_status(curr_client) # # get_worker_free = run_dask_tools.check_free_resources(worker_status) # # # test = 1 #", "is very strict about spaces :return: ''' flexaid_config_input_template = '''# Optimization method (genetic-algorithms)", "out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contrib_pol_filename']", "{10}.txt \" \\ \"--out {11}_out.pdbqt\".format(self.receptor_file, self.ligand_file, self.x_center, self.y_center, self.z_center, self.x_size, self.y_size, self.z_size, self.samples_exhaust,", "= '''# Optimization method (genetic-algorithms) METOPT GA # The variation in degrees for", "generate_config_input_file, final_str # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param': self.generate_ga_dat_pameters, # 'GA_DataName':", "kay\", curr_folder) return curr_folder + os.sep + self.run_folder_name_samples def save_state_data_json(self, filedata=None, filename=None): '''", "total_free_cores = 16 # Production # worker_status = run_dask_tools.get_dask_worker_status(curr_client) # # get_worker_free =", "EPI_uber_dock = uber_docker.UberDockerObject(receptor_file, ligand_file, '.', molname=molname, receptor_name=receptor_name) >>> >>> >>> EPI_uber_dock.prepare_uber_dock_protocol() >>> EPI_uber_dock.run_uber_dock_protocol()", "Number of TOP individuals to print in console # Integer in interval [1,N]", "run Dock programs >>> EPI_uber_dock.prep_rDock_dock_run_commands() >>> EPI_uber_dock.run_rDock_simulation(parallel=True, waitTime=15) >>> >>> #This is for", "the molmolpy Developers nor the names of any # contributors may be used", ">>> >>> #This is for FlexAid >>> EPI_uber_dock.prepare_flexaid_settings() >>> EPI_uber_dock.process_flexaid_ligand() >>> EPI_uber_dock.get_flexaid_clefts() >>>", "run on dask server # # # # # TODO this works need", "print(\"Running Vina\") # TODO need to think about seed #./ ledock_linux_x86 dock. in", "#################################################################################################################### def flexaid_generate_ga_dat_parameters_dask(self): ''' Generate GA dat parameters for flexaid docking :return: '''", "output MAXRES 20 # Only output scored atoms in the final results #", "Vina :return: ''' current_pid = multiprocessing.current_process().pid print(\"Main Process with PID:{}\".format(current_pid)) # free_threads_for_Vina =", "easier switch worker_ids = {} for i, id in enumerate(get_worker_free): worker_ids.update({i: id}) custom_index_curr", "= 'LasR' >>> run_type = 'vina_sample' >>> >>> >>> >>> receptor_file = EPI_folder", "OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE", "@hlp.timeit def run_dask_gmmpbsa(self, client=None, max_jobs_to_run=10): # from molmolpy.moldock import run_dask_tools from molmolpy.tools import", "worker_status_free = None test = 1 # maybe 2 async threads, one checks", "10 ''' generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' + self.ligand_name + '.dat'", "bonds flexible_bonds_data = open( self.flexaid_absolute_processed_files_folder + os.sep + '{0}.inp'.format(self.ligand_flexaid_initials), 'r') flexible_bonds_data_text = flexible_bonds_data.read()", "i.split(' ') print(temp) flex_index = temp[-2] flexible_index_list.append(int(flex_index)) temp_line = 'OPTIMZ {0} {1} {2}\\n'.format(self.flexaid_res_number,", "DOF of the ligand (-1) optimz1 = 'OPTIMZ 9999 {0} -1\\n\\n'.format(self.flexaid_res_chain) # Rotational", "= self.uber_dock_folder + os.sep + self.rdock_folder_name # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) # self.directories", "setup LeDock settings') except Exception as e: print(\"error in runSim: \", e) sys.exit(0)", "pandas dataframe Calculate pdb receptor center of mass :return: ''' self.trajectory_mdtraj = md.load_xtc(self.trajectory_file,", "vina samples run\\n') print('Vina run type: {0}'.format(self.run_type_samples)) print(self.sim_folder_run_samples) folder_utils.create_folder(self.sim_folder_run_samples) self.folder_exists_samples = True self.state_data_samples.update({'receptorFile':", "self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex': self.first_index})", "dirname: # # print(dir_name) dir_names.append(dirname) # print sorted(dir_names) return sorted(dir_names) except Exception as", "free cores # TODO article Pagadala Software for molecular docking: a review #", "permeable to no permeability PERMEA 0.9 # Permeability for side-chain rotamer acceptance #", "open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['polar_filename'] out_mem =", "'LasR' >>> >>> >>> EPI_uber_dock = uber_docker.UberDockerObject(receptor_file, ligand_file, '.', molname=molname, receptor_name=receptor_name) >>> >>>", "{} for i, id in enumerate(get_worker_free): worker_ids.update({i: id}) custom_index_curr = 3 while len(queue_jobs)", "# This is for Autodock vina >>> EPI_uber_dock.set_up_Vina_Box() >>> EPI_uber_dock.prepare_Vina_run() >>> EPI_uber_dock.prepVinaSim_uberDock() >>>", "# key='key_test', # # retries=retries_num) # # # TODO This part needs further", "# # run_docking_queue += run_docking_FlexAid[curr_FlexAid:curr_FlexAid + FlexAid] # # curr_FlexAid += FlexAid #", "Vina\") # TODO need to think about seed self.save_run_name = 'vina_' + self.run_type_samples", "# test = 1 # # test = 1 ################################################################################################### test = 1", "\"{0} -spli {1}\".format(command_receptor, ligand_clear_dok) print(command_to_run) self.LeDock_command_run_list.append(command_to_run) print(\"Launching new Sim\") temp_dict = {str(sample_num): {'save_run_name':", "{'generateGA_param': self.generate_ga_dat_pameters, # 'GA_DataName': self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # #", "self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name) print('TADA ', self.directories) self.json_state_file = self.sim_folder_run +", "+ '_' + self.run_type_samples self.sim_folder_run_samples = self.folder_path + os.sep + self.run_folder_name_samples # Create", "#SCOLIG # Ends reading of CONFIG file ENDINP ''' final_str = '''''' #", "information about run states if len(self.directories_samples) == 0: print('Creating folder for vina samples", "self.molecule_name = self.state_data_samples['molName'] self.receptor_name = self.state_data_samples['receptorName'] # TODO test self.samples_exhaust = self.state_data_samples['samples_exhaust'] self.sim_folder_run_samples", "print(\"Launching new Sim\") temp_dict = {str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'commandToClean':command_to_clean, 'dokFileName':ligand_clear_dok, 'runFinished':", "notice, this list of conditions and the following # disclaimer in the documentation", "'.', molname=molname, receptor_name=receptor_name) >>> >>> >>> EPI_uber_dock.prepare_uber_dock_protocol() >>> EPI_uber_dock.run_uber_dock_protocol() Use together >>> self.prepare_uber_dock_protocol()", "is True: continue except Exception as error: pass finished_jobs.append(job) finished_jobs_dict.update({i: True}) results =", "rotamer acceptance threshold # Float in interval [0.0-1.0] DEECLA 0.8 # Use instances", "'folderPath': self.folder_path, 'runType': self.run_type_samples, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run_samples, 'directory': self.directories_samples, 'setup':", "for LeDock, rDock,FlexAid, Vina :return: ''' current_pid = multiprocessing.current_process().pid print(\"Main Process with PID:{}\".format(current_pid))", "find_files: \", e) sys.exit(0) def find_sample_folders(self, folder_path='.', dir_name='vina_sample'): try: dir_names = [] for", "# Adaptive crossover and mutation probabilities # Floats in interval [0.0,1.0] ADAPTKCO 0.95", "True @hlp.timeit def prep_LeDock_dock_command(self, sample_num, pose_gen=20): ''' prepare each separate rDock run command", "# # # Upload files to all clients client.upload_file # task = client.submit(run_dask_tools.upload_g_mmpbsa_files_dask,", "workers=[workstation_address], hash=False) big_future = pop_item task_g_mmpbsa = client.submit(run_dask_tools.run_gmmpbsa_using_dask, big_future, workers=[workstation_address], key=run_name, retries=retries_num) submitted_jobs_dask.append(task_g_mmpbsa)", "TOP individuals to print in console # Integer in interval [1,N] where N", "finished_jobs_dict): import copy # modified_submitted_jobs_dask = copy.deepcopy(submitted_jobs_dask) for i, job in enumerate(submitted_jobs_dask): status", "# # # # This is to run on dask server # #", "= self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat return [generate_ga_dat, ] # self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs,", "open(filename, \"w\"), sort_keys=True, indent=4) # TODO should I add json saving of information", "finished_jobs_dict[i] is True: continue except Exception as error: pass finished_jobs.append(job) finished_jobs_dict.update({i: True}) results", "'_' + 'rDock' # self.rdock_absolute_folder_name = self.uber_dock_folder + os.sep + self.rdock_folder_name # self.directories", "+ os.sep + self.generate_ga_dat self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) self.generate_ga_dat_object_file.close() self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) self.state_data['dockSoftware']['FlexAid']['GA_params'].update(", "folder_to_save + os.sep + file_save file_save_list.append(file_save) abs_file_save_list.append(abs_file_save) temp_state[str(i)].update({'runFinished':False, 'index':i, 'absFolder':folder_to_save, 'fileSave':file_save, 'absFileSave':abs_file_save, 'firstIndex':self.first_index,", "the names of any # contributors may be used to endorse or promote", "self.state_data_samples.update({'receptorFile': self.receptor_file, 'ligandFile': self.ligand_file, 'exhaustivenessList': self.exhaustiveness, 'samples_exhaust': self.samples_exhaust, 'samplesList': self.samples_run, 'folderPath': self.folder_path, 'runType':", "temp_state[str(i)].update({'runFinished':False, 'index':i, 'absFolder':folder_to_save, 'fileSave':file_save, 'absFileSave':abs_file_save, 'firstIndex':self.first_index, 'secondIndex':self.second_index, 'indexFile':self.index_file, 'mdpFile':self.mdp_file, 'tprFile':self.tpr_file}) energy_mm = 'energy_MM_{0}.xvg'.format(i)", "+ 'global_binding_site.pdb\\n\\n' # Specify the degrees of freedom (DOF) of the processed ligand", "Redistributions of source code must retain the above copyright # notice, this list", "(number individuals in the population) # Integer in interval [1-N] NUMCHROM 500 #", "Need to check whteter lepro ran fine print('Updated receptor with LePro\\n') os.chdir(self.uber_dock_folder) self.state_data['dockSoftware']['LeDock'].update(", "= job.status if status == 'finished': test = 1 # pop_item = modified_submitted_jobs_dask.pop(i)", "Floats in interval [0.0,1.0] ADAPTKCO 0.95 0.10 0.95 0.10 # Constant crossover probability", "out_mem = results_dask['contribMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder +", "+ os.sep + self.run_folder_name_samples def save_state_data_json(self, filedata=None, filename=None): ''' :param filename: Saves state", "with open(filename, 'w') as outfile: # json.dump(self.cluster_models, outfile) # pickle.dump(self.cluster_models, open(filename, \"wb\")) #", "to create # Only considered when REPMODEL is BOOM BOOMFRAC 1.0 # Number", "STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY", "self.receptor_name + '-' + self.ligand_name + '.dat' self.generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep +", "sample_num in self.ledock_samples: self.prep_LeDock_dock_command(sample_num) print('Now continue for LeDock:D') self.save_state_data_json() test = 1 self.prep_LeDock_run", "seperately >>> EPI_uber_dock.calculate_max_radius_from_com() >>> EPI_uber_dock.calculate_cube_edges() >>> EPI_uber_dock.calculate_box_edges_from_com() >>> >>> >>> EPI_uber_dock.prepare_uber_docker() >>> >>>", "def get_uber_g_mmpbsa_run_folder_name(self): curr_folder = os.getcwd() return curr_folder + os.sep + self.run_folder_name def prepare_g_mmpbsa_dask_protocol(self,", "status = job.status if status == 'finished': test = 1 # pop_item =", "os.path.abspath(filename) self.load_state_called = True print(os.path.abspath(__file__)) self.state_data = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.trajectory_file =", "def prep_mdtraj_object(self): ''' Prepare receptor mdtraj object get mdtraj topology and save as", "= self.state_data['folderPath'] self.run_type = self.state_data['runType'] self.molecule_name = self.state_data['molName'] self.receptor_name = self.state_data['receptorName'] # TODO", "# self.directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.rdock_folder_name) # print('TADA ', self.directories) test = 1 #", "from fully permeable to no permeability PERMEA 0.9 # Permeability for side-chain rotamer", "self.generate_ga_dat_pameters, 'GA_DataName': self.generate_ga_dat, 'GA_DATA_Abs': self.generate_ga_dat_name_abs, 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() self.save_state_data_json() #", "self.prep_sample_run = True else: self.load_state_file_samples = self.json_samples_state_file self.load_state_called_samples = True self.load_samples_state_data_json(self.load_state_file_samples) self.prep_sample_run =", ">>> receptor_name = 'LasR' >>> >>> >>> EPI_uber_dock = uber_docker.UberDockerObject(receptor_file, ligand_file, '.', molname=molname,", "= True print(os.path.abspath(__file__)) self.state_data = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.trajectory_file = self.state_data['trajectoryFile'] self.mdp_file", "# Permeability allowed between atoms # Float in interval [0.0,1.0] from fully permeable", "'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contrib_pol_filename'] out_mem = results_dask['contrib_pol_mem']", "self.load_state_called = False else: self.load_state_file = self.json_state_file self.load_state_called = True self.load_state_data_json(self.load_state_file) def prepare_ledock_settings(self):", "print('error is ', error) # print('i is ', i) print('Finished checking dask submissions", "1 # # test = 1 ################################################################################################### test = 1 #################################################################################################### self.run_mmpbsa_dask =", "data) # # test = 1 # # test = 1 ################################################################################################### test", "= 'OPTIMZ 9999 {0} 0\\n\\n'.format(self.flexaid_res_chain) # Add one extra line for each flexible", "# test = 1 # tasks_upload = [] # big_future = client.scatter(self.dask_prep, broadcast=True)", "num_samples=10): ''' Prepare rdock run commands and save to json :param num_samples: test", "self.prep_LeDock_dock_command(sample_num) print('Now continue for LeDock:D') self.save_state_data_json() test = 1 self.prep_LeDock_run = True @hlp.timeit", "# print('LeDock_params is empty verify yolo') # # try: # self.LeDock_sim_states = self.state_data['dockSoftware']['LeDock']['simStates']", "# run_dask_tools.upload_g_mmpbsa_files_dask(big_future) #TODO # Scatter a lot better using scatter for big files", "time.sleep(10) test = 1 # ############################################################################################### # # # work_address = workstation1_preped['workerAddress'] #", "= abs_folder + os.sep + results_dask['energyMM_filename'] out_mem = results_dask['energyMM_mem'] out_file = open(out_name, 'w')", "self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() ############################################################################################## def", "is True: full_g_mmpbsa_data = self.state_data['energySoftware']['g_mmpbsa'] test = 1 tpr_abs= full_g_mmpbsa_data['tprFile'] tpr_file = open(tpr_abs,", "as e: print(\"error in find_files: \", e) sys.exit(0) def find_sample_folders(self, folder_path='.', dir_name='vina_sample'): try:", "update_results[key] self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] # results_dask = results[key]['dask'] # else: # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)] =", "@file __init__.py # @brief G_MMPBSA DASK PROJECT # @author <NAME> # # <!--------------------------------------------------------------------------", "no permeability PERMEA 0.9 # Permeability for side-chain rotamer acceptance # Float in", "def run_dask_gmmpbsa(self, client=None, max_jobs_to_run=10): # from molmolpy.moldock import run_dask_tools from molmolpy.tools import run_dask_tools", "# # except: # print('LeDock_params is empty verify yolo') # # try: #", "results_dask['contrib_apol_filename'] out_mem = results_dask['contrib_apol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder", "lot better using scatter for big files for upload G_MMPBSA files # test", "# prepare worker ids for easier switch worker_ids = {} for i, id", "anchor angle of the ligand # Float in interval [1.0-30.0] VARANG 5.0 #", "20 # Only output scored atoms in the final results # Comment the", "as binding-site rgnopt_locclf = 'RNGOPT LOCCLF ' + 'global_binding_site.pdb\\n\\n' # Specify the degrees", "to use # BTN.inp has the unique RESNUMC identifier LIG9999A inplig = 'INPLIG", "= self.receptor_name + '_' + self.molecule_name + '_' + self.run_type_samples self.sim_folder_run_samples = self.folder_path", "test = 1 # print('Last Check of submitted jobs') while len(finished_jobs) != job_quantity:", "# MAYBE CHECK FOLDER HERE # #big_future = client.scatter(pop_item, workers=[workstation_address], hash=False) big_future =", "self.receptor_name, 'simRunFolder': self.sim_folder_run_samples, 'directory': self.directories_samples, 'setup': self.setup_box, 'folderCreated': self.folder_exists_samples, 'simStates': {}}) self.prepVinaSim_samples() self.save_state_data_json(filedata=self.state_data_samples,", "provided # with the distribution. # * Neither the name of the molmolpy", "EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,", "# free_threads_for_Vina = num_threads - LeDock-rDock-FlexAid run_g_mmpbsa = [] run_mmpbsa_queue = [] #", "index_file, first_index, second_index, molname, receptor_name) >>> >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client) >>> >>> >>>", "folder): try: VIP = [] for dirname, dirnames, filenames in os.walk(folder): for i", "one checks finished simulations, other submits jobs ############################################################################################### gmmbpsa_min_mem = 1000 retries_num =", "list. Read more in the :ref:`User Guide <MoleculeObject>`. Parameters ---------- filename : str,", "+ os.sep + self.rdock_folder_name # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) # self.directories = folder_utils.find_folder_in_path(self.uber_dock_folder,", "self.tpr_file, 'indexFile': self.index_file, 'runFolderName': self.run_folder_name, 'folderPath': self.folder_path, 'jsonStates': self.json_state_file, 'runType': self.run_type, 'molName': self.molecule_name,", "+ '.json' # This will hold information about run states self.g_mmpbsa_folder = self.get_uber_g_mmpbsa_run_folder_name()", "0.9 # Permeability for side-chain rotamer acceptance # Float in interval [0.0,1.0] from", "task_g_mmpbsa = client.submit(run_dask_tools.run_gmmpbsa_using_dask, big_future, workers=[workstation_address], key=run_name, retries=retries_num) submitted_jobs_dask.append(task_g_mmpbsa) else: key_name = curr_item['save_run_name'] run_name", "''' run uber dock protocol for LeDock, rDock,FlexAid, Vina :return: ''' current_pid =", ">>> # >>> # LasR_MOR_mmpbsa_calc.run_dask_docking(client) Notes ----- See examples/cluster/plot_dbscan.py for an example. This", "True self.load_samples_state_data_json(self.load_state_file_samples) self.prep_sample_run = True def get_exhaust_run_folder_name(self): curr_folder = os.getcwd() return curr_folder +", "def prepare_samples_collection_run(self, standard_exhaust=128, num_samples_run=100, run_type='samples_run'): if self.setup_box is False: print('Please setup simulation box')", "# 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # # # self.state_data_samples = self.state_data.copy() #", "# This will hold information about run states if len(self.directories_samples) == 0: print('Creating", "= workstation_preped_temp['freeMemory'] workstation_freecpu = workstation_preped_temp['freeCores'] curr_item_prog = curr_item['Program'] ############################################################ # submitted_jobs_dask len less", "self.state_data self.save_state_data_json(filedata=self.before_dask, filename=self.absolute_json_state_file) # allow CPU to cool down # self.hold_nSec(5) print('This success", "energy_mm = 'energy_MM_{0}.xvg'.format(i) polar = 'polar_{0}.xvg'.format(i) apolar = 'apolar_{0}.xvg'.format(i) contrib_mm = 'contrib_MM_{0}.dat'.format(i) contrib_pol", "# self.output_receptor_rdock.close() # # self.output_ligand_rdock = Outputfile(\"sd\", \"{0}.sd\".format(self.ligand_name)) # self.output_ligand_rdock.write(self.ligand_pybel ) # self.output_ligand_rdock.close()", "\"ledock_{0}_sample_{1}\".format(self.run_type, sample_num) random_seed = np.random.randint(low_seed, high_seed) command_to_run = \"{0} {1}\".format(command_receptor, parm_name) ligand_clear_dok =", "copyright notice, this list of conditions and the following # disclaimer in the", "[0] key sample_num = results[key]['part_num'] if prog == 'g_mmpbsa': sample_num = results[key]['part_num'] results_dask", "# Specify the degrees of freedom (DOF) of the processed ligand with residue", "pandas dataframe. Create MoleculeObject by parsing pdb or pdbqt file. 2 types of", "samples run\\n') print('Vina run type: {0}'.format(self.run_type_samples)) print(self.sim_folder_run_samples) folder_utils.create_folder(self.sim_folder_run_samples) self.folder_exists_samples = True self.state_data_samples.update({'receptorFile': self.receptor_file,", "memory complexity O(n). Sparse neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``.", "run_docking_LeDock + run_docking_FlexAid + run_docking_Vina final_queue_job = [] # Need to select those", "self.tpr_file = self.state_data['tprFile'] self.index_file = self.state_data['indexFile'] self.folder_path = self.state_data['folderPath'] self.run_type = self.state_data['runType'] self.molecule_name", "where N is NUMCHROM STEADNUM 950 # Number of TOP individuals to print", "self.x_center, self.y_center, self.z_center, self.x_size, self.y_size, self.z_size, self.samples_exhaust, self.num_modes, self.save_run_name, self.save_run_name) print(command_to_run) self.command_samples_run_list.append(command_to_run) print(\"Launching", "950 # Number of TOP individuals to print in console # Integer in", "= True self.samples_exhaust = standard_exhaust self.samples_run = list(range(1, num_samples_run + 1)) self.run_folder_name_samples =", "has been created') self.trajectory_file = traj self.topology_file = topol self.tpr_file = tpr_file self.mdp_file", "traj self.topology_file = topol self.tpr_file = tpr_file self.mdp_file = mdp_file self.index_file = index_file", "# @hlp.timeit def run_dask_gmmpbsa(self, client=None, max_jobs_to_run=10): # from molmolpy.moldock import run_dask_tools from molmolpy.tools", "Notes ----- See examples/cluster/plot_dbscan.py for an example. This implementation bulk-computes all neighborhood queries,", "while len(queue_jobs) > 0: if curr_index == len(queue_jobs): curr_index = 0 if curr_worker", "add json saving of information or not? def load_state_data_json(self, filename): ''' :param filename:", ">>> EPI_uber_dock.prepare_uber_dock_protocol() >>> EPI_uber_dock.run_uber_dock_protocol() Use together >>> self.prepare_uber_dock_protocol() for preparation >>> self.run_uber_dock_protocol() or", "LeDock, rDock,FlexAid, Vina :return: ''' current_pid = multiprocessing.current_process().pid print(\"Main Process with PID:{}\".format(current_pid)) #", "in source and binary forms, with or without # modification, are permitted provided", "Fraction of population to create # Only considered when REPMODEL is BOOM BOOMFRAC", "# 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() ############################################################################################## def flexaid_generate_config_input_dask(self):", "self.molecule_name + '_' + self.run_type + '.json' # This will hold information about", "# big_future, # workers=[worker_address], # key='key_scatter_{0}'.format(worker_address), # retries=retries_num) # tasks_upload.append(task) # print(\"Starting uploading", "self.directories_samples) self.json_samples_state_file = self.sim_folder_run_samples + os.sep + self.receptor_name + '_' + self.molecule_name +", "self.ledock_input_info = self.ledock_data['LeDockInputInfo'] # self.param_ledock_template = self.ledock_data['paramFull'] # except: # print('LeDock setting part", "select_indexes = list(range(total_free_cores)) # Maximum parallel #div_traj = math.trunc(traj_len/total_free_cores) select_frames = list(range(0,traj_len,div_traj)) select_indexes", "or not? def load_state_data_json(self, filename): ''' :param filename: load json state data :return:", "# Specify to use one or multiple cleft(s) as binding-site rgnopt_locclf = 'RNGOPT", "= open( self.flexaid_absolute_processed_files_folder + os.sep + '{0}.inp'.format(self.ligand_flexaid_initials), 'r') flexible_bonds_data_text = flexible_bonds_data.read() flexible_bonds_data.close() flexible_bonds_data_text_list", "self.receptor_name = receptor_name # This might need to get modified def find_sample_files(self, folder):", "= 1 # self.rdock_folder_name = self.receptor_name + '_' + self.molecule_name + '_' +", "0.0 # Use Vcontacts indexing VINDEX # Vcontacts plane definition # Value in", "Bioinformatics. 18(10); 1365-1373 VCTPLA R # Use normalized surfaces in contacts NORMAR #", "= self.state_data['tprFile'] self.index_file = self.state_data['indexFile'] self.folder_path = self.state_data['folderPath'] self.run_type = self.state_data['runType'] self.molecule_name =", "1 # self.rdock_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + 'rDock'", "AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR", "abs_file_save = folder_to_save + os.sep + file_save file_save_list.append(file_save) abs_file_save_list.append(abs_file_save) temp_state[str(i)].update({'runFinished':False, 'index':i, 'absFolder':folder_to_save, 'fileSave':file_save,", "status == 'finished': test = 1 # pop_item = modified_submitted_jobs_dask.pop(i) try: if finished_jobs_dict[i]", "self.folder_exists = self.state_data['folderCreated'] self.absolute_json_state_file = self.state_data['absoluteJsonStates'] self.g_mmpbsa_folder = self.state_data['RunFolder'] self.json_state_file = self.state_data['jsonStates'] test", "self.ledock_folder_exists, 'LeDockAbsFolder': self.ledock_absolute_folder_name, 'LeDockFolderName': self.ledock_folder_name}) self.save_state_data_json() self.load_state_called = False self.ledock_title = self.receptor_name +", "self.generate_ga_dat_name_abs, 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() self.save_state_data_json() # TODO this part needs", "flex_index) flexible_index_list_phrases.append(temp_line) test = 1 final_str += pdbnam final_str += inplig final_str +=", "self.g_mmpbsa_folder = self.get_uber_g_mmpbsa_run_folder_name() self.absolute_json_state_file = self.g_mmpbsa_folder + os.sep + self.receptor_name + '_' +", "self.generate_ga_dat_object_file.close() # # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param': self.generate_ga_dat_pameters, # 'GA_DataName':", "on dask server # # # # # TODO this works need to", "the same neighborhood. >>> LasR_MOR_mmpbsa_calc = g_mmpbsa_dask.GMMPBSAObject(traj, topol_file, tpr_file, mdp_file, index_file, first_index, second_index,", "file ENDINP ''' final_str = '''''' # Specify the processed target file to", "= self.absolute_json_state_file else: filedata = filedata filename = filename json.dump(filedata, open(filename, \"w\"), sort_keys=True,", "print('->' * 10) print('Everything is finished :))))))') print('---' * 10) print('\\n') def prepare_for_dask_cluster(self,", "chain A # Translational DOF of the ligand (-1) optimz1 = 'OPTIMZ 9999", "'_' + 'LeDock' self.ledock_absolute_folder_name = self.uber_dock_folder + os.sep + self.ledock_folder_name test = 1", "1 # tasks_upload = [] # big_future = client.scatter(self.dask_prep, broadcast=True) # for worker", ">>> EPI_uber_dock.run_FlexAid_simulation(parallel=True, waitTime=15) >>> >>> >>> # This is for Autodock vina >>>", "ligand_file, '.', molname=molname, receptor_name=receptor_name) >>> >>> >>> EPI_uber_dock.prepare_uber_dock_protocol() >>> EPI_uber_dock.run_uber_dock_protocol() Use together >>>", "OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER", "pdb receptor center of mass :return: ''' self.trajectory_mdtraj = md.load_xtc(self.trajectory_file, top=self.topology_file) self.trajectory_mdtraj_topology =", "0 if curr_worker == len(worker_ids): curr_worker = 0 print('-----------------------------------------------------------------') worker_status_temp = run_dask_tools.get_dask_worker_status(curr_client, custom_index=custom_index_curr)", "workstation_address = workstation_preped_temp['workerAddress'] # This way folder is buggy workstation_dir = original_get_worker_free[curr_worker_id]['preped']['workerDir'] workstation_freemem", "MUTARATE 0.10 # Crossover operator # Intragenic crossovers are possible INTRAGEN # Specifies", "self.state_data_samples['simRunFolder'] # .split('/')[-1] self.directories_samples = self.state_data_samples['directory'] self.setup_box = self.state_data_samples['setup'] self.folder_exists = self.state_data_samples['folderCreated'] self.x_center", "-spli {1}\".format(command_receptor, ligand_clear_dok) print(command_to_run) self.LeDock_command_run_list.append(command_to_run) print(\"Launching new Sim\") temp_dict = {str(sample_num): {'save_run_name': self.save_run_name,", "self.ledock_data['num_samples'] # self.ledock_input_info = self.ledock_data['LeDockInputInfo'] # self.param_ledock_template = self.ledock_data['paramFull'] # except: # print('LeDock", "self.molecule_name + '_' + self.run_type_samples self.sim_folder_run_samples = self.folder_path + os.sep + self.run_folder_name_samples #", "prepare_for_dask_cluster(self, LeDock=2, rDock=2, FlexAid=2, Vina=2, parallel=False): ''' run uber dock protocol for LeDock,", "Bissecting, Radical and Extended radical plane # See McConkey et al. (2002) Bioinformatics.", "''' # self.absolute_path = os.path.abspath(filename) self.load_state_called_samples = True print(os.path.abspath(__file__)) self.state_data_samples = json.load(open(filename, \"r\"))", "self.ledock_rmsd = self.ledock_data['LeDock_params']['LeDockRMSD'] # # self.ledock_xmin = self.ledock_data['LeDock_params']['xmin'] # self.ledock_xmax = self.ledock_data['LeDock_params']['xmax'] #", "running ', run_name) # submitted_jobs_dask_temp, finished_jobs_temp = self.check_dask_jobs(submitted_jobs_dask,finished_jobs) finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs,", "self.state_data_samples['boxSettings']['numModes'] def hold_nSec(self, n): for i in range(1, n + 1): print(i) time.sleep(1)", "considered when REPMODEL is STEADY # Integer in interval [1,N-1] where N is", "num_samples: test value 6 :return: ''' try: self.g_mmpbsa_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list']", "self.folder_path, 'jsonStates': self.json_state_file, 'runType': self.run_type, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run, 'RunFolder': self.g_mmpbsa_folder,", "'-' + self.ligand_name + '.inp' return generate_config_input_file, final_str # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # #", "os.sep + self.run_folder_name # Create folder don't forget # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type)", "# # task = client.submit(run_dask_tools.run_vina_using_dask, # # data, # # workers=[work_address], # #", "the calculations of surfaces in contact COMPLF VCT # Do not consider intramolecular", "MoleculeObject by parsing pdb or pdbqt file. 2 types of parsers can be", "rmsdst = 'RMSDST ' + '{0}_ref.pdb\\n\\n'.format( self.ligand_flexaid_initials) final_str += rmsdst final_str += flexaid_config_input_template", "= self.state_data['energySoftware']['g_mmpbsa'] # data_pre.update({'dask': {}}) data_pre = {} data_pre.update({'tprName':tpr_filename, 'tprMem':tpr_mem}) data_pre.update({'mdpName':mdp_filename, 'mdpMem':mdp_mem}) data_pre.update({'indexName':index_filename,", "-*- coding: utf-8 -*- # !/usr/bin/env python # # @file __init__.py # @brief", "= 1 # pop_item = modified_submitted_jobs_dask.pop(i) try: if finished_jobs_dict[i] is True: continue except", "', run_name) if curr_index == 0: curr_index = 0 else: curr_index -= 1", "prepare_ledock_settings(self): ''' Prepare ultraDock folder and initial json configuration >>> EPI_uber_dock.prepare_rdock_settings() Convert with", "+ '.inp' return generate_config_input_file, final_str # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param':", "def load_samples_state_data_json(self, filename): ''' :param filename: load json state data :return: ''' #", "finished\") else: print('Please setup LeDock settings') except Exception as e: print(\"error in runSim:", "self.state_data['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] # results_dask = results[key]['dask'] # else: #", "5 flexible bonds flexible_bonds_data = open( self.flexaid_absolute_processed_files_folder + os.sep + '{0}.inp'.format(self.ligand_flexaid_initials), 'r') flexible_bonds_data_text", "None: # filedata = filedata # filename = self.absolute_json_state_file else: filedata = filedata", "return curr_folder + os.sep + self.run_folder_name_samples def save_state_data_json(self, filedata=None, filename=None): ''' :param filename:", "EPI_uber_dock.prepare_Vina_run() >>> EPI_uber_dock.prepVinaSim_uberDock() >>> EPI_uber_dock.runVinaSim_uber() Molecule object loading of pdb and pbdqt file", "model # Values in [BOOM,STEADY] REPMODEL BOOM # Fraction of population to create", "name def set_mgltools_path(self, path): print('MGLTools path is set to ', path) self.mgltools_utilities =", "abs_file_save_list.append(abs_file_save) temp_state[str(i)].update({'runFinished':False, 'index':i, 'absFolder':folder_to_save, 'fileSave':file_save, 'absFileSave':abs_file_save, 'firstIndex':self.first_index, 'secondIndex':self.second_index, 'indexFile':self.index_file, 'mdpFile':self.mdp_file, 'tprFile':self.tpr_file}) energy_mm =", "select_frames = list(range(0,traj_len,div_traj)) select_indexes = list(range(len(select_frames))) folder_to_save = self.g_mmpbsa_folder temp_mdtraj = [] temp_mdtraj_indexes", "if prep_g_mmpbsa is True: # # self.prepare_uber_docker() # self.prepare_ledock_settings() # self.prep_LeDock_dock_run_commands() @hlp.timeit def", "= sample_data['ligand_clear_name'] + '.dok' # -spli MOR_flexaid.dok command_to_clean = \"{0} -spli {1}\".format(command_receptor, ligand_clear_dok)", "# submitted_jobs_dask len less than 16 jobs_running = len(submitted_jobs_dask) - len(finished_jobs) max_jobus =", "forget # Exhaustiveness for all samples # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories_samples =", "submitted_jobs.append(pop_item) # MAYBE CHECK FOLDER HERE # #big_future = client.scatter(pop_item, workers=[workstation_address], hash=False) big_future", "filedata = self.state_data # elif filedata is not None: # filedata = filedata", "run_g_mmpbsa curr_LeDock = 0 # very slow # while len(run_docking_queue) != 40: #", "# pop_item = modified_submitted_jobs_dask.pop(i) try: if finished_jobs_dict[i] is True: continue except Exception as", "The variation in degrees for the anchor angle of the ligand # Float", "1 self.run_mmpbsa_dask = final_queue_job # random.shuffle(self.run_docking_queue) print('Finished preparing g_mmpbsa jobs') # TODO should", "EPI_uber_dock.generate_rdock_cavity() >>> # Prepare and run Dock programs >>> EPI_uber_dock.prep_rDock_dock_run_commands() >>> EPI_uber_dock.run_rDock_simulation(parallel=True, waitTime=15)", "be used: 1.molmolpy 2. pybel Stores molecule information in pandas dataframe as well", "protocol for LeDock, rDock,FlexAid, Vina :return: ''' current_pid = multiprocessing.current_process().pid print(\"Main Process with", "= self.state_data['dockSoftware']['LeDock']['simStates'] self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] print('No need to generate LeDock commands') except: self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list':", "# self.receptor_file_ledock = self.ledock_data['LeDock_params']['receptorFile'] # self.ledock_rmsd = self.ledock_data['LeDock_params']['LeDockRMSD'] # # self.ledock_xmin = self.ledock_data['LeDock_params']['xmin']", "self.mdp_file, 'tprFile': self.tpr_file, 'indexFile': self.index_file, 'runFolderName': self.run_folder_name, 'folderPath': self.folder_path, 'jsonStates': self.json_state_file, 'runType': self.run_type,", "self.receptor_name def set_molecule_name(self, mol_name): self.molecule_name = mol_name def set_receptor_name(self, receptor_name): self.receptor_name = receptor_name", "optional The maximum distance between two samples for them to be considered as", "molname self.ligand_name = molname self.receptor_name = receptor_name self.run_type = 'g_mmpbsa' self.state_data = {}", "out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['polar_filename']", "using scatter for big files for upload G_MMPBSA files # test = 1", "'traj_part{0}.xtc'.format(i) abs_file_save = folder_to_save + os.sep + file_save file_save_list.append(file_save) abs_file_save_list.append(abs_file_save) temp_state[str(i)].update({'runFinished':False, 'index':i, 'absFolder':folder_to_save,", "'tprMem': tpr_mem}) data['dask'].update({'mdpName': mdp_filename, 'mdpMem': mdp_mem}) data['dask'].update({'indexName': index_filename, 'indexMem': index_mem}) test = 1", "# Only considered when REPMODEL is BOOM BOOMFRAC 1.0 # Number of new", "print(\"LeDock command generation finished\") else: print('Please setup LeDock settings') except Exception as e:", "# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "file_save_list self.abs_file_save_list = abs_file_save_list self.simStates = simStates test = 1 self.g_mmpbsa_prepared = True", "self.state_data['dockSoftware']['LeDock']['simStates'] # self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] # print('No need to generate LeDock commands') #", "print(\"error in find_files: \", e) sys.exit(0) def find_sample_folders(self, folder_path='.', dir_name='vina_sample'): try: dir_names =", "= EPI_folder + os.sep + 'centroid.pdb' >>> ligand_file = EPI_folder + os.sep +", "if finished_jobs_dict[i] is True: continue except Exception as error: pass finished_jobs.append(job) finished_jobs_dict.update({i: True})", "self.state_data['energySoftware']['g_mmpbsa']['absFileList'] self.simStates = self.state_data['energySoftware']['g_mmpbsa']['simStates'] test = 1 self.g_mmpbsa_prepared = self.state_data['energySoftware']['g_mmpbsa']['prepare'] # self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames})", "# # test = 1 # print('Last Check of submitted jobs') while len(finished_jobs)", "# * Neither the name of the molmolpy Developers nor the names of", "FlexAid # # run_docking_queue += run_docking_Vina[curr_Vina:curr_Vina + Vina] # curr_Vina += Vina #", "', i) print('Finished checking dask submissions ---\\n') print('---' * 10) return finished_jobs, finished_jobs_dict", "ADAPTVGA is 0 MUTARATE 0.10 # Crossover operator # Intragenic crossovers are possible", "{9} \" \\ \"--seed 10 \" \\ \"--log {10}.txt \" \\ \"--out {11}_out.pdbqt\".format(self.receptor_file,", "self.molecule_name + '_' + self.run_type self.sim_folder_run = self.folder_path + os.sep + self.run_folder_name #", "+ self.run_type_samples + '.json' # This will hold information about run states if", "command_receptor = self.ledock_path + os.sep + 'lepro_linux_x86' + ' {0} '.format(self.receptor_ledock_pdb) os.system(command_receptor) self.lepro_pdb_file", "ultraDock folder and initial json configuration >>> EPI_uber_dock.prepare_rdock_settings() Convert with pybel to mol2", "or pdbqt file. 2 types of parsers can be used: 1.molmolpy 2. pybel", "+ '_' + self.run_type + '.json' if len(self.directories) == 0: print('Creating folder for", "0: print('Creating folder for vina samples run\\n') print('Vina run type: {0}'.format(self.run_type_samples)) print(self.sim_folder_run_samples) folder_utils.create_folder(self.sim_folder_run_samples)", "Molecule object loading of pdb and pbdqt file formats. Then converts to pandas", "works need to create a quiiee # # retries_num = 2 # #", "# import json # with open(filename, 'w') as outfile: # json.dump(self.cluster_models, outfile) #", "run saving state run # filename = self.sim_folder_run + os.sep + self.receptor_name +", "side-chains #SCOLIG # Ends reading of CONFIG file ENDINP ''' final_str = ''''''", "IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO,", "This is not necessary since info is inside pdbqt file # elif 'vina_sample_'", "= run_docking_LeDock + run_docking_FlexAid + run_docking_Vina final_queue_job = [] # Need to select", "try: # self.ledock_param_title = self.ledock_data['LeDock_params']['title'] # self.rdock_title = self.ledock_data['LeDock_params']['title'] # # self.receptor_file_ledock =", "# # try: # self.setup_ledock_pameters = self.ledock_data['setup_LeDock'] # self.ledock_num_samples = self.ledock_data['num_samples'] # self.ledock_input_info", "and len(submitted_jobs_dask) == 1: curr_index = 0 else: curr_index += 1 curr_worker +=", "to be thought out #################################################################################################################### def prepare_samples_collection_run(self, standard_exhaust=128, num_samples_run=100, run_type='samples_run'): if self.setup_box is", "= self.json_state_file # filename = self.absolute_json_state_file # filedata = self.state_data self.save_state_data_json(filedata=self.before_dask, filename=self.absolute_json_state_file) #", "self.run_type_samples + '_' + str(sample_num) command_to_run = \"vina --receptor {0} \" \\ \"--ligand", "' {0} '.format(self.receptor_ledock_pdb) os.system(command_receptor) self.lepro_pdb_file = 'pro.pdb' # Need to check whteter lepro", "# total_free_cores = 0 # # for worker in get_worker_free: # preped =", "= 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Passed running ', run_name) # submitted_jobs_dask_temp, finished_jobs_temp = self.check_dask_jobs(submitted_jobs_dask,finished_jobs) finished_jobs,", "names of any # contributors may be used to endorse or promote products", "= get_worker_free_temp[curr_worker_id] workstation_preped_temp = workstation_info_temp['preped'] workstation_address = workstation_preped_temp['workerAddress'] # This way folder is", "--> import itertools import time color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold', 'darkorange']) import", "hold information about run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() ######################################################################################## # # LeDock", "box') sys.exit(0) self.run_type_samples = run_type self.prep_samples_run = True self.samples_exhaust = standard_exhaust self.samples_run =", "path): print('MGLTools path is set to ', path) self.mgltools_utilities = path def set_flexaid_path(self,", "}) simStates['simStates'].update(temp_state) self.mdtraj_frames = select_frames self.mdtraj_sliced = temp_mdtraj self.mdtraj_parts = temp_mdtraj_indexes self.file_save_list =", "= self.state_data['jsonStates'] test = 1 # self.rdock_folder_name = self.receptor_name + '_' + self.molecule_name", "upload test # big_future = self.dask_prep # run_dask_tools.upload_g_mmpbsa_files_dask(big_future) #TODO # Scatter a lot", ">>> EPI_uber_dock.prepare_Vina_run() >>> EPI_uber_dock.prepVinaSim_uberDock() >>> EPI_uber_dock.runVinaSim_uber() Molecule object loading of pdb and pbdqt", "self.receptor_file_ledock = self.ledock_data['LeDock_params']['receptorFile'] # self.ledock_rmsd = self.ledock_data['LeDock_params']['LeDockRMSD'] # # self.ledock_xmin = self.ledock_data['LeDock_params']['xmin'] #", "self.ledock_folder_name) print('TADA ', self.ledock_directories) test = 1 # This will hold information about", "os.sep + 'ledock_linux_x86' sample_data = self.ledock_input_info[str(sample_num)] parm_name = sample_data['ledock_parm_name'] test = 1 self.save_run_name", "Upload files to all clients client.upload_file # task = client.submit(run_dask_tools.upload_g_mmpbsa_files_dask, # big_future, #", "run_docking_queue = run_docking_LeDock + run_docking_FlexAid + run_docking_Vina final_queue_job = [] # Need to", "Number of new individuals to generate at each generation # Only considered when", "self.prep_mdtraj_object() # original data before transformation # Add receptor name def set_mgltools_path(self, path):", "+ os.sep + self.generate_ga_dat return [generate_ga_dat, ] # self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') #", "angle of the ligand # Float in interval [1.0-30.0] VARANG 5.0 # The", "saving of information or not? def load_samples_state_data_json(self, filename): ''' :param filename: load json", "G_MMPBSA DASK PROJECT # @author <NAME> # # <!-------------------------------------------------------------------------- # # Copyright (c)", "self.run_mmpbsa_dask # job_test = queue_jobs[0] # # result = run_dask_tools.run_gmmpbsa_using_dask(job_test) test = 1", "self.set_up_ledock_dock_blind_parameters(title=self.ledock_title, receptor_file=self.lepro_pdb_file, ledock_rmsd=self.ledock_rmsd, x_center=self.x_center, y_center=self.y_center, z_center=self.z_center) else: print('state has beeen loaded \\n') ##############################################################################", ">>> >>> receptor_file = EPI_folder + os.sep + 'centroid_model_clust2.pdbqt' >>> ligand_file = EPI_folder", "I add json saving of information or not? def load_state_data_json(self, filename): ''' :param", "Convert with pybel to mol2 for receptor and sd for ligand :return: '''", "def save_state_data_json(self, filedata=None, filename=None): ''' :param filename: Saves state file :return: ''' #", "+= optimz1 final_str += optimz2 for y in flexible_index_list_phrases: final_str += y final_str", "self.ligand_name = molname self.receptor_name = receptor_name self.run_type = 'g_mmpbsa' self.state_data = {} self.state_data_samples", "self.g_mmpbsa_prepared is True: print('Do not prep files') return 'Do not prep files' traj_len", "'_LeDock Parameter file' self.ledock_rmsd = 0.5 self.set_up_ledock_dock_blind_parameters(title=self.ledock_title, receptor_file=self.lepro_pdb_file, ledock_rmsd=self.ledock_rmsd, x_center=self.x_center, y_center=self.y_center, z_center=self.z_center) else:", "final_str += '\\n' rmsdst = 'RMSDST ' + '{0}_ref.pdb\\n\\n'.format( self.ligand_flexaid_initials) final_str += rmsdst", "= results[key]['dask'] original_data = self.state_data['energySoftware'][prog] abs_folder = self.g_mmpbsa_folder # original_data['AbsFolder'] out_name = abs_folder", "poses :return: ''' try: if self.setup_ledock_pameters is not False: # print(\"Running Vina\") #", "# self.run_FlexAid_sim(FlexAid_sample_num, waitTime=waitTime) data = self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)] save_run_name = \"g_mmpbsa_part_{0}\".format(part_num) data.update({'Program': 'g_mmpbsa'}) data.update({'part_num': part_num})", "an example. This implementation bulk-computes all neighborhood queries, which increases the memory complexity", "os.sep + 'centroid_model_clust2.pdbqt' >>> ligand_file = EPI_folder + os.sep + 'EPI.pdbqt' >>> molname", "'folderCreated': self.folder_exists_samples, 'simStates': {}}) self.prepVinaSim_samples() self.save_state_data_json(filedata=self.state_data_samples, filename=self.json_samples_state_file) self.load_state_called_samples = False self.prep_sample_run = True", "'_' + self.molecule_name + '_' + self.run_type + '.json' # This will hold", "data = self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)] save_run_name = \"g_mmpbsa_part_{0}\".format(part_num) data.update({'Program': 'g_mmpbsa'}) data.update({'part_num': part_num}) data.update({'save_run_name': save_run_name}) data.update({'dask':", "is ', i) print('Finished checking dask submissions ---\\n') print('---' * 10) return finished_jobs,", "# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL,", ">>> >>> EPI_uber_dock = uber_docker.UberDockerObject(receptor_file, ligand_file, '.', molname=molname, receptor_name=receptor_name) >>> >>> >>> EPI_uber_dock.prepare_uber_dock_protocol()", "if load_state_file is not None: self.load_state_data_json(self.load_state_file) else: print('G_MMPBSA Object has been created') self.trajectory_file", "= update_results[key] self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] # results_dask = results[key]['dask'] # else: # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)]", "# self.output_receptor_rdock = Outputfile(\"mol2\", \"{0}.mol2\".format(self.receptor_name)) # self.output_receptor_rdock.write(self.receptor_pybel) # self.output_receptor_rdock.close() # # self.output_ligand_rdock =", "'pro.pdb' # Need to check whteter lepro ran fine print('Updated receptor with LePro\\n')", "# quit # sys.exit() print(\"Vina sample run command prep finished\") else: print('Please setup", "dataframe. Create MoleculeObject by parsing pdb or pdbqt file. 2 types of parsers", "self.generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) self.generate_ga_dat_object_file.close()", "in interval [0.0,1.0] # Only considered when ADAPTVGA is 0 MUTARATE 0.10 #", "self.g_mmpbsa_prepared = False # This part needs clarification self.prep_mdtraj_object() # original data before", "those that are not finished for pre_job in run_mmpbsa_queue: # print(pre_job) if pre_job['runFinished']", "queries, which increases the memory complexity to O(n.d) where d is the average", "from the interaction matrix # Float in interval [-200.0,200.0] SLVPEN 0.0 # Use", "Developers nor the names of any # contributors may be used to endorse", "nor the names of any # contributors may be used to endorse or", "DASK') pop_item = queue_jobs.pop(curr_index) key_name = pop_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Cur run", "curr_FlexAid += FlexAid # # run_docking_queue += run_docking_Vina[curr_Vina:curr_Vina + Vina] # curr_Vina +=", "it's for exhaustiveness or traditional run self.folder_path = folder_path self.command_run_list = [] self.command_samples_run_list", "exhaustiveness or traditional run self.folder_path = folder_path self.command_run_list = [] self.command_samples_run_list = []", ">>> EPI_uber_dock.runVinaSim_uber() Molecule object loading of pdb and pbdqt file formats. Then converts", "= self.state_data['simRunFolder'] # .split('/')[-1] self.directories = self.state_data['directory'] self.folder_exists = self.state_data['folderCreated'] self.absolute_json_state_file = self.state_data['absoluteJsonStates']", "self.rdock_absolute_folder_name = self.uber_dock_folder + os.sep + self.rdock_folder_name # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) #", "# print('LeDock setting part is empty verify yolo') # # try: # self.ledock_param_title", "= self.state_data['RunFolder'] self.json_state_file = self.state_data['jsonStates'] test = 1 # self.rdock_folder_name = self.receptor_name +", "sample_num, pose_gen=20): ''' prepare each separate rDock run command :param sample_num: :param pose_gen:", "# Integer in interval [1,N] where N is NUMCHROM PRINTCHR 10 ''' generate_ga_dat", "configuration :return: ''' self.run_folder_name = self.receptor_name + '_' + self.molecule_name + '_' +", "data['absFileSave'] traj_file = open(traj_abs, 'rb') traj_mem = traj_file.read() traj_filename = data['fileSave'] data['dask'].update({'tprName': tpr_filename})", "final_str += flexaid_config_input_template generate_config_input_file = 'CONFIG_' + self.receptor_name + '-' + self.ligand_name +", "surfaces in contacts NORMAR # Define the RMSD cutoff between clusters # Float", "state data :return: ''' # self.absolute_path = os.path.abspath(filename) self.load_state_called = True print(os.path.abspath(__file__)) self.state_data", "+ results_dask['contrib_apol_filename'] out_mem = results_dask['contrib_apol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name =", "os.getcwd() print(\"Yippie yi kay\", curr_folder) return curr_folder + os.sep + self.run_folder_name_samples def save_state_data_json(self,", "very strict about spaces :return: ''' flexaid_config_input_template = '''# Optimization method (genetic-algorithms) METOPT", "verify yolo') # # test = 1 def prepare_g_mmpbsa(self): ''' Prepare g_mmpbsa run", "self.check_dask_jobs(submitted_jobs_dask,finished_jobs) finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) test = 1 ###################################################3 # update", "select those that are not finished for pre_job in run_mmpbsa_queue: # print(pre_job) if", "test = 1 # # total_free_cores = 0 # # for worker in", "0 # very slow # while len(run_docking_queue) != 40: # run_docking_queue += run_docking_LeDock[curr_LeDock:curr_LeDock", "self.ledock_xmin = self.ledock_data['LeDock_params']['xmin'] # self.ledock_xmax = self.ledock_data['LeDock_params']['xmax'] # self.ledock_ymin = self.ledock_data['LeDock_params']['ymin'] # self.ledock_ymax", "= False self.ledock_title = self.receptor_name + '_' + self.ligand_name + '_LeDock Parameter file'", "considered when ADAPTVGA is 0 MUTARATE 0.10 # Crossover operator # Intragenic crossovers", "to ', path) self.flexaid_path = path def set_ledock_path(self, path): print('LeDock path is set", ":param filename: load json state data :return: ''' # self.absolute_path = os.path.abspath(filename) self.load_state_called", "json :param num_samples: test value 6 :return: ''' try: self.g_mmpbsa_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] self.ledock_samples", "must reproduce the above # copyright notice, this list of conditions and the", "maybe 2 async threads, one checks finished simulations, other submits jobs ############################################################################################### gmmbpsa_min_mem", "self.mdp_file = self.state_data['mdpFile'] self.tpr_file = self.state_data['tprFile'] self.index_file = self.state_data['indexFile'] self.folder_path = self.state_data['folderPath'] self.run_type", "indexing VINDEX # Vcontacts plane definition # Value in [B,R,X] for Bissecting, Radical", "import mdtraj as md from molmolpy.utils.cluster_quality import * from molmolpy.utils import folder_utils import", "sample_data = self.ledock_input_info[str(sample_num)] parm_name = sample_data['ledock_parm_name'] test = 1 self.save_run_name = \"ledock_{0}_sample_{1}\".format(self.run_type, sample_num)", "= '/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24' class GMMPBSAObject(object): \"\"\" Usage example >>> EPI_folder = '/media/Work/MEGA/Programming/StressHormones/dock_EPI' >>> EPI_samples", "1 # data['dask'].update({'cavFile':cav_file_mem }) # self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)] = data test = 1 run_g_mmpbsa.append(data) #", "run # filename = self.sim_folder_run + os.sep + self.receptor_name + '_' + self.molecule_name", "filedata is None: # # filename = self.json_state_file # filename = self.absolute_json_state_file #", "# # work_address = workstation1_preped['workerAddress'] # # # # # This is to", "folder_utils.find_folder_in_path(self.uber_dock_folder, self.rdock_folder_name) # print('TADA ', self.directories) test = 1 # This will hold", "self.absolute_json_state_file, 'directory': self.directories, 'folderCreated': self.folder_exists, 'simStates': {}}) self.state_data.update(programs_dict) # self.prepVinaSim_exhaust() self.save_state_data_json() self.load_state_called =", "following # disclaimer in the documentation and/or other materials provided # with the", "# Number of TOP individuals to print in console # Integer in interval", "= abs_folder + os.sep + results_dask['contribMM_filename'] out_mem = results_dask['contribMM_mem'] out_file = open(out_name, 'w')", "= 0 else: curr_index -= 1 pop_item.update({'workingDir':workstation_dir}) submitted_jobs.append(pop_item) # MAYBE CHECK FOLDER HERE", "set to ', path) self.ledock_path = path def prep_mdtraj_object(self): ''' Prepare receptor mdtraj", "big files for upload G_MMPBSA files # test = 1 # tasks_upload =", "= num_threads - LeDock-rDock-FlexAid run_g_mmpbsa = [] run_mmpbsa_queue = [] # Prepare outputs", "+ self.run_folder_name_samples def save_state_data_json(self, filedata=None, filename=None): ''' :param filename: Saves state file :return:", "and pbdqt file formats. Then converts to pandas dataframe. Create MoleculeObject by parsing", "= {} self.state_data_samples = {} self.g_mmpbsa_run_finished = False self.g_mmpbsa_sim_states = {'simStates': {}} self.objects_loaded", "# run_docking_queue = run_docking_LeDock + run_docking_FlexAid + run_docking_Vina final_queue_job = [] # Need", "in full_g_mmpbsa_data['parts']: # self.run_FlexAid_sim(FlexAid_sample_num, waitTime=waitTime) data = self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)] save_run_name = \"g_mmpbsa_part_{0}\".format(part_num) data.update({'Program': 'g_mmpbsa'})", "= 0 else: curr_index += 1 curr_worker += 1 time.sleep(10) test = 1", "'RunFolder': self.g_mmpbsa_folder, 'absoluteJsonStates': self.absolute_json_state_file, 'directory': self.directories, 'folderCreated': self.folder_exists, 'simStates': {}}) self.state_data.update(programs_dict) # self.prepVinaSim_exhaust()", "copy.deepcopy(get_worker_free) # TEST IT WORKS # queue_jobs = self.run_mmpbsa_dask # job_test = queue_jobs[0]", "total_free_cores = 0 # # for worker in get_worker_free: # preped = get_worker_free[worker]['preped']", "RESNUMC identifier LIG9999A inplig = 'INPLIG ' + '{0}.inp\\n\\n'.format( self.ligand_flexaid_initials) # Specify to", "N is NUMCHROM PRINTCHR 10 ''' self.generate_ga_dat_pameters = True self.generate_ga_dat = 'ga_inp_' +", "self.ledock_samples: self.prep_LeDock_dock_command(sample_num) print('Now continue for LeDock:D') self.save_state_data_json() test = 1 self.prep_LeDock_run = True", "= filename json.dump(filedata, open(filename, \"w\"), sort_keys=True, indent=4) # TODO should I add json", "# Only considered if EXCHET is disabled # To include water molecules, uncomment", "data['dask'].update({'indexName': index_filename, 'indexMem': index_mem}) test = 1 # data['dask'].update({'cavFile':cav_file_mem }) # self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)] =", "= run_dask_tools.get_dask_worker_status(curr_client) # # get_worker_free = run_dask_tools.check_free_resources(worker_status) # # # test = 1", "# Prepare and run Dock programs >>> EPI_uber_dock.prep_rDock_dock_run_commands() >>> EPI_uber_dock.run_rDock_simulation(parallel=True, waitTime=15) >>> >>>", "Float in interval [1.0-30.0] VARFLX 10.0 # Use Vcontacts in the calculations of", "of the ligand (-1) optimz1 = 'OPTIMZ 9999 {0} -1\\n\\n'.format(self.flexaid_res_chain) # Rotational DOF", "def get_receptor_name(self): return self.receptor_name def set_molecule_name(self, mol_name): self.molecule_name = mol_name def set_receptor_name(self, receptor_name):", "@author <NAME> # # <!-------------------------------------------------------------------------- # # Copyright (c) 2016-2019,<NAME>. # All rights", "self.state_data.copy() ############################################################################################## def flexaid_generate_config_input_dask(self): ''' Generate flexaid config input file Flexaid is very", "# The variation in degrees for the anchor dihedral of the ligand #", "Dock programs >>> EPI_uber_dock.prep_rDock_dock_run_commands() >>> EPI_uber_dock.run_rDock_simulation(parallel=True, waitTime=15) >>> >>> #This is for FlexAid", "setup simulation box') sys.exit(0) self.run_type_samples = run_type self.prep_samples_run = True self.samples_exhaust = standard_exhaust", "curr_client = dask_client # Testing Phase total_free_cores = 16 # Production # worker_status", "import helper as hlp # matplotlib.style.use('ggplot') sns.set(style=\"darkgrid\") low_seed = 1 high_seed = 999999999", "+ os.sep + self.ledock_folder_name test = 1 # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.ledock_directories", "[] queue_jobs = self.run_mmpbsa_dask job_quantity = len(queue_jobs) finished_jobs = [] finished_jobs_dict = {}", "population) # Integer in interval [1-N] NUMCHROM 500 # Number of generations #", "function # Value in [LINEAR,PSHARE] FITMODEL PSHARE # Parameters of the shared fitness", "############################################################################################## def flexaid_generate_config_input_dask(self): ''' Generate flexaid config input file Flexaid is very strict", "# preped = get_worker_free[worker]['preped'] # total_free_cores += preped['freeCores'] if prep_g_mmpbsa is False: print('prep", "of results/docking poses to output MAXRES 20 # Only output scored atoms in", "1 self.g_mmpbsa_prepared = self.state_data['energySoftware']['g_mmpbsa']['prepare'] # self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) # self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) # self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts})", "of side-chain conformers rather than using the Penultimate Rotamer Library #ROTOBS # Defines", "finished_jobs_temp = self.check_dask_jobs(submitted_jobs_dask,finished_jobs) finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) test = 1 ###################################################3", "variation in degrees for flexible dihedrals of the ligand # Float in interval", "import folder_utils import json from molmolpy.utils import helper as hlp # matplotlib.style.use('ggplot') sns.set(style=\"darkgrid\")", "in interval [1,N-1] where N is NUMCHROM STEADNUM 950 # Number of TOP", "= self.state_data_samples['runType'] self.molecule_name = self.state_data_samples['molName'] self.receptor_name = self.state_data_samples['receptorName'] # TODO test self.samples_exhaust =", "checking dask submissions ---\\n') print('---' * 10) return finished_jobs, finished_jobs_dict # @hlp.timeit def", "\", e) # sys.exit(0) def get_molecule_name(self): return self.molecule_name def get_receptor_name(self): return self.receptor_name def", "info is inside pdbqt file # elif 'vina_sample_' in i: # VIP.append(i) return", "= 999999999 mgltools_utilities = '/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24' class GMMPBSAObject(object): \"\"\" Usage example >>> EPI_folder =", "dirname, dirnames, filenames in os.walk(folder_path): # print(dirname, '-') if dir_name in dirname: #", "BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "0.8 # Solvent term penalty # When the value is 0.0 the solvent", "return generate_config_input_file, final_str # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param': self.generate_ga_dat_pameters, #", "be used to endorse or promote products derived # from this software without", "This might need to get modified def find_sample_files(self, folder): try: VIP = []", "print i if 'out' in i: VIP.append(i) # This is not necessary since", "{} self.g_mmpbsa_run_finished = False self.g_mmpbsa_sim_states = {'simStates': {}} self.objects_loaded = False self.g_mmpbsa_prepared =", "OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS", "N is NUMCHROM STEADNUM 950 # Number of TOP individuals to print in", "self.folder_path, 'runType': self.run_type_samples, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run_samples, 'directory': self.directories_samples, 'setup': self.setup_box,", "to generate LeDock commands') except: self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list': self.ledock_samples}) self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states) for sample_num in self.ledock_samples: self.prep_LeDock_dock_command(sample_num)", "= [] self.command_samples_run_list = [] self.molecule_name = molname self.ligand_name = molname self.receptor_name =", "'directory': self.directories, 'folderCreated': self.folder_exists, 'simStates': {}}) self.state_data.update(programs_dict) # self.prepVinaSim_exhaust() self.save_state_data_json() self.load_state_called = False", "os.sep + self.generate_ga_dat return [generate_ga_dat, ] # self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') # self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template)", "self.absolute_receptor_ledock_pdb = self.ledock_absolute_folder_name + os.sep + self.receptor_ledock_pdb self.absolute_ligand_ledock_mol2 = self.ledock_absolute_folder_name + os.sep +", "for run saving state run # filename = self.sim_folder_run + os.sep + self.receptor_name", "self.run_type = self.state_data['runType'] self.molecule_name = self.state_data['molName'] self.receptor_name = self.state_data['receptorName'] # TODO test self.sim_folder_run", "folder \\n') print(self.ledock_directories) folder_utils.create_folder(self.ledock_absolute_folder_name) test = 1 self.receptor_ledock_pdb = \"{0}.pdb\".format(self.receptor_name) self.ligand_ledock_mol2 = \"{0}.mol2\".format(self.ligand_name)", "gmmbpsa_min_mem = 1000 retries_num = 2 curr_index = 0 curr_worker = 0 #", "= self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.ledock_directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.ledock_folder_name) print('TADA ', self.ledock_directories) test = 1", "# This will hold information about run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() if", "else: print('Please setup vina box settings') # except Exception as e: # print(\"error", ") # self.output_ligand_rdock.close() self.ledock_folder_name = self.receptor_name + '_' + self.molecule_name + '_' +", "None: # filename = self.json_state_file filename = self.absolute_json_state_file filedata = self.state_data # elif", "except: print('G_mmpbsa is empty verify yolo') # # test = 1 # #", "+ '_' + self.molecule_name + '_' + self.run_type + '.json' if len(self.directories) ==", "enumerate(submitted_jobs_dask): status = job.status if status == 'finished': test = 1 # pop_item", "run_dask_gmmpbsa(self, client=None, max_jobs_to_run=10): # from molmolpy.moldock import run_dask_tools from molmolpy.tools import run_dask_tools test", "EPI_uber_dock.prepare_rdock_settings() Convert with pybel to mol2 for receptor and sd for ligand :return:", "rdock run commands and save to json :param num_samples: test value 6 :return:", "results_dask['out_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep +", "worker_address = worker_info['preped']['workerAddress'] # # retries_num = 2 # # # Upload files", "self.folder_path + os.sep + self.run_folder_name # Create folder don't forget # self.directories =", "This will hold information about run states self.g_mmpbsa_folder = self.get_uber_g_mmpbsa_run_folder_name() self.absolute_json_state_file = self.g_mmpbsa_folder", "'tprFile':self.tpr_file}) energy_mm = 'energy_MM_{0}.xvg'.format(i) polar = 'polar_{0}.xvg'.format(i) apolar = 'apolar_{0}.xvg'.format(i) contrib_mm = 'contrib_MM_{0}.dat'.format(i)", "= 1 run_mmpbsa_queue = run_g_mmpbsa # run_docking_queue = run_docking_LeDock + run_docking_FlexAid + run_docking_Vina", "the ligand # Float in interval [1.0-30.0] VARANG 5.0 # The variation in", "next line if you wish to obtain the whole complex SCOOUT # Only", "# The allowable flexible bonds are listed as FLEDIH lines in Processed_files/BTN.inp #", "radical plane # See McConkey et al. (2002) Bioinformatics. 18(10); 1365-1373 VCTPLA R", "in the final results # Comment the next line if you wish to", "json state data :return: ''' # self.absolute_path = os.path.abspath(filename) self.load_state_called_samples = True print(os.path.abspath(__file__))", "files' if self.g_mmpbsa_prepared is True: print('Do not prep files') return 'Do not prep", "= {} data_pre.update({'tprName':tpr_filename, 'tprMem':tpr_mem}) data_pre.update({'mdpName':mdp_filename, 'mdpMem':mdp_mem}) data_pre.update({'indexName':index_filename, 'indexMem':index_mem}) self.dask_prep = data_pre for part_num", "hold information about run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() if len(self.ledock_directories) == 0:", "[0.0,1000.0] SHAREALF 4.0 SHAREPEK 5.0 SHARESCL 10.0 # Reproduction model # Values in", "index_abs.split(os.sep)[-1] # data_pre = self.state_data['energySoftware']['g_mmpbsa'] # data_pre.update({'dask': {}}) data_pre = {} data_pre.update({'tprName':tpr_filename, 'tprMem':tpr_mem})", "is not necessary since info is inside pdbqt file # elif 'vina_sample_' in", "self.index_file = index_file self.first_index = first_index self.second_index = second_index self.prep_g_mmpbsa_run = False self.folder_exists", "self.command_run_list = [] self.command_samples_run_list = [] self.molecule_name = molname self.ligand_name = molname self.receptor_name", "self.folder_path = self.state_data['folderPath'] self.run_type = self.state_data['runType'] self.molecule_name = self.state_data['molName'] self.receptor_name = self.state_data['receptorName'] #", "dask submissions ---\\n') print('---' * 10) return finished_jobs, finished_jobs_dict # @hlp.timeit def run_dask_gmmpbsa(self,", "# self.rdock_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + 'rDock' #", "= self.get_uber_dock_run_folder_name() ######################################################################################## # # LeDock settings part # # self.ledock_data = self.state_data['dockSoftware']['LeDock']", ">>> >>> >>> receptor_file = EPI_folder + os.sep + 'centroid_model_clust2.pdbqt' >>> ligand_file =", "'apolar_{0}.xvg'.format(i) contrib_mm = 'contrib_MM_{0}.dat'.format(i) contrib_pol = 'contrib_pol_{0}.dat'.format(i) contrib_apol = 'contrib_apol_{0}.dat'.format(i) temp_state[str(i)].update({'energyMM':energy_mm, 'polar':polar, 'apolar':apolar,", "get_worker_free[worker] # worker_address = worker_info['preped']['workerAddress'] # # retries_num = 2 # # #", "self.prepare_ledock_settings() # self.prep_LeDock_dock_run_commands() @hlp.timeit def prep_LeDock_dock_run_commands(self, num_samples=10): ''' Prepare rdock run commands and", "self.folder_exists, 'simStates': {}}) self.state_data.update(programs_dict) # self.prepVinaSim_exhaust() self.save_state_data_json() self.load_state_called = False else: self.load_state_file =", "flexaid_generate_ga_dat_parameters_dask(self): ''' Generate GA dat parameters for flexaid docking :return: ''' self.flexaid_ga_dat_param_template =", "retries_num = 2 # # task = client.submit(run_dask_tools.run_vina_using_dask, # # data, # #", "else: key_name = curr_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Passed running ', run_name) #", "# # self.state_data_samples = self.state_data.copy() # # self.save_state_data_json() # TODO this part needs", "modified def find_sample_files(self, folder): try: VIP = [] for dirname, dirnames, filenames in", "Autodock vina >>> EPI_uber_dock.set_up_Vina_Box() >>> EPI_uber_dock.prepare_Vina_run() >>> EPI_uber_dock.prepVinaSim_uberDock() >>> EPI_uber_dock.runVinaSim_uber() Molecule object loading", ">>> EPI_uber_dock.prepVinaSim_uberDock() >>> EPI_uber_dock.runVinaSim_uber() Molecule object loading of pdb and pbdqt file formats.", "+ Vina] # curr_Vina += Vina # # test = 1 # test", "0 or 1 ADAPTVGA 1 # Adaptive crossover and mutation probabilities # Floats", "\\ \"--ligand {1} \" \\ \"--center_x {2} \" \\ \"--center_y {3} \" \\", "self.ligand_name + '_LeDock Parameter file' self.ledock_rmsd = 0.5 self.set_up_ledock_dock_blind_parameters(title=self.ledock_title, receptor_file=self.lepro_pdb_file, ledock_rmsd=self.ledock_rmsd, x_center=self.x_center, y_center=self.y_center,", "def get_exhaust_run_folder_name(self): curr_folder = os.getcwd() return curr_folder + os.sep + self.run_folder_name def get_samples_run_folder_name(self):", "test = 1 curr_client = client worker_status = run_dask_tools.get_dask_worker_status(curr_client) get_worker_free = run_dask_tools.check_free_resources(worker_status) import", "self.absolute_path = os.path.abspath(filename) self.load_state_called = True print(os.path.abspath(__file__)) self.state_data = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness')", "topology and save as pandas dataframe Calculate pdb receptor center of mass :return:", "self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) # self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) # self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates)", "'w') self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) self.generate_ga_dat_object_file.close() self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) self.state_data['dockSoftware']['FlexAid']['GA_params'].update( {'generateGA_param': self.generate_ga_dat_pameters, 'GA_DataName': self.generate_ga_dat, 'GA_DATA_Abs': self.generate_ga_dat_name_abs, 'GA_ParamFull':", "+ run_docking_FlexAid + run_docking_Vina final_queue_job = [] # Need to select those that", "not? def load_samples_state_data_json(self, filename): ''' :param filename: load json state data :return: '''", "__init__(self, traj, topol, tpr_file, mdp_file, index_file, first_index, second_index, molname='Unknown', receptor_name='Unknown', folder_path='.', job_name =", "data['dask'].update({'tprName': tpr_filename}) data['dask'].update({'mdpName': mdp_filename}) data['dask'].update({'indexName': index_filename}) data['dask'].update({'trajMem':traj_mem, 'trajName':traj_filename}) data['dask'].update({'tprName': tpr_filename, 'tprMem': tpr_mem}) data['dask'].update({'mdpName':", "docking: a review # This will be for leDock # if prep_g_mmpbsa is", "= self.ledock_data['LeDockInputInfo'] # self.param_ledock_template = self.ledock_data['paramFull'] # except: # print('LeDock setting part is", "rdock folder in uberDocker folder \\n') print(self.ledock_directories) folder_utils.create_folder(self.ledock_absolute_folder_name) test = 1 self.receptor_ledock_pdb =", "[] # Prepare outputs import copy self.before_dask = copy.deepcopy(self.state_data) ################################################################################ if self.g_mmpbsa_prepared is", "to cool down # self.hold_nSec(5) print('This success ---> ', i) except Exception as", "WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND", "out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['apolar_filename'] out_mem = results_dask['apolar_mem'] out_file", "TODO # This part runs the main program submitted_jobs = [] submitted_jobs_dask =", "# # result = run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num, data) # # test = 1 # #", "# The variation in degrees for the anchor angle of the ligand #", "# try: # os.system(command_to_run) # except KeyboardInterrupt: # # quit # sys.exit() print(\"Vina", "-*- # !/usr/bin/env python # # @file __init__.py # @brief G_MMPBSA DASK PROJECT", "# @file __init__.py # @brief G_MMPBSA DASK PROJECT # @author <NAME> # #", "derived from the interaction matrix # Float in interval [-200.0,200.0] SLVPEN 0.0 #", "# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #", ">>> LasR_MOR_mmpbsa_calc = g_mmpbsa_dask.GMMPBSAObject(traj, topol_file, tpr_file, mdp_file, index_file, first_index, second_index, molname, receptor_name) >>>", "self.run_type + '.json' # This will hold information about run states self.g_mmpbsa_folder =", "self.receptor_ledock_pdb = \"{0}.pdb\".format(self.receptor_name) self.ligand_ledock_mol2 = \"{0}.mol2\".format(self.ligand_name) self.absolute_receptor_ledock_pdb = self.ledock_absolute_folder_name + os.sep + self.receptor_ledock_pdb", "self.g_mmpbsa_run_finished = False self.g_mmpbsa_sim_states = {'simStates': {}} self.objects_loaded = False self.g_mmpbsa_prepared = False", "using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- \"\"\" def __init__(self, traj, topol, tpr_file,", "self.run_type + '.json' if len(self.directories) == 0: print('Creating folder for g_mmpbsa run\\n') print(self.sim_folder_run)", "'cornflowerblue', 'gold', 'darkorange']) import multiprocessing import mdtraj as md from molmolpy.utils.cluster_quality import *", "about spaces :return: ''' flexaid_config_input_template = '''# Optimization method (genetic-algorithms) METOPT GA #", "To exclude these groups, uncomment the next line #EXCHET # Include water molecules", "'key_{0}_{1}'.format(key_name, curr_worker_id) print('Cur run ', run_name) if curr_index == 0: curr_index = 0", "# self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) except: print('G_mmpbsa is empty verify yolo') # #", "in Processed_files/BTN.inp # In our example, Biotin has 5 flexible bonds flexible_bonds_data =", "+= rmsdst final_str += flexaid_config_input_template generate_config_input_file = 'CONFIG_' + self.receptor_name + '-' +", "in dirname: # # print(dir_name) dir_names.append(dirname) # print sorted(dir_names) return sorted(dir_names) except Exception", "self.flexaid_absolute_processed_files_folder + os.sep + '{0}.inp'.format(self.ligand_flexaid_initials), 'r') flexible_bonds_data_text = flexible_bonds_data.read() flexible_bonds_data.close() flexible_bonds_data_text_list = flexible_bonds_data_text.split('\\n')", "self.get_uber_dock_run_folder_name() if len(self.ledock_directories) == 0: print('Creating rdock folder in uberDocker folder \\n') print(self.ledock_directories)", "optimz2 for y in flexible_index_list_phrases: final_str += y final_str += '\\n' rmsdst =", "retries=retries_num) # tasks_upload.append(task) # print(\"Starting uploading to \", worker_address) test = 1 #", "EPI_uber_dock.get_flexaid_clefts() >>> EPI_uber_dock.flexaid_generate_ga_dat_parameters() >>> EPI_uber_dock.flexaid_generate_config_input() >>> EPI_uber_dock.prep_FlexAid_dock_run_commands() >>> EPI_uber_dock.run_FlexAid_simulation(parallel=True, waitTime=15) >>> >>> >>>", "FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE", "else: curr_index -= 1 pop_item.update({'workingDir':workstation_dir}) submitted_jobs.append(pop_item) # MAYBE CHECK FOLDER HERE # #big_future", "self.samples_exhaust = standard_exhaust self.samples_run = list(range(1, num_samples_run + 1)) self.run_folder_name_samples = self.receptor_name +", "ligand with residue number 9999 and chain A # Translational DOF of the", "self.load_state_called = True self.load_state_data_json(self.load_state_file) def prepare_ledock_settings(self): ''' Prepare ultraDock folder and initial json", "setup vina box settings') # except Exception as e: # print(\"error in Sample", "# test = 1 # test = 1 run_mmpbsa_queue = run_g_mmpbsa # run_docking_queue", "# TODO this part needs to be thought out #################################################################################################################### def flexaid_generate_ga_dat_parameters_dask(self): '''", "this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY", "= filedata # filename = self.absolute_json_state_file else: filedata = filedata filename = filename", "'centroid_model_clust2.pdbqt' >>> ligand_file = EPI_folder + os.sep + 'EPI.pdbqt' >>> molname = 'EPI'", "self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) time.sleep(60) print('->' * 10) print('Everything is finished :))))))') print('---' *", "test = 1 final_str += pdbnam final_str += inplig final_str += rgnopt_locclf final_str", "self.g_mmpbsa_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] print('No need to generate LeDock commands') except:", "= \"vina --receptor {0} \" \\ \"--ligand {1} \" \\ \"--center_x {2} \"", "error: print('error is ', error) # print('i is ', i) print('Finished checking dask", "+ str(sample_num) command_to_run = \"vina --receptor {0} \" \\ \"--ligand {1} \" \\", "print('LeDock path is set to ', path) self.ledock_path = path def prep_mdtraj_object(self): '''", ">>> self.prepare_uber_dock_protocol() for preparation >>> self.run_uber_dock_protocol() or seperately >>> EPI_uber_dock.calculate_max_radius_from_com() >>> EPI_uber_dock.calculate_cube_edges() >>>", "test = 1 # Local upload test # big_future = self.dask_prep # run_dask_tools.upload_g_mmpbsa_files_dask(big_future)", "run_docking_rDock[curr_rDock:curr_rDock + rDock] # curr_rDock += rDock # # run_docking_queue += run_docking_FlexAid[curr_FlexAid:curr_FlexAid +", "# TODO need to think about seed self.save_run_name = 'vina_' + self.run_type_samples +", "curr_rDock += rDock # # run_docking_queue += run_docking_FlexAid[curr_FlexAid:curr_FlexAid + FlexAid] # # curr_FlexAid", "print('Creating folder for vina samples run\\n') print('Vina run type: {0}'.format(self.run_type_samples)) print(self.sim_folder_run_samples) folder_utils.create_folder(self.sim_folder_run_samples) self.folder_exists_samples", "print(dir_name) dir_names.append(dirname) # print sorted(dir_names) return sorted(dir_names) except Exception as e: print(\"Problem with", "= True self.state_data_samples.update({'receptorFile': self.receptor_file, 'ligandFile': self.ligand_file, 'exhaustivenessList': self.exhaustiveness, 'samples_exhaust': self.samples_exhaust, 'samplesList': self.samples_run, 'folderPath':", "= mol_name def set_receptor_name(self, receptor_name): self.receptor_name = receptor_name # This might need to", "e) sys.exit(0) def find_sample_folders(self, folder_path='.', dir_name='vina_sample'): try: dir_names = [] for dirname, dirnames,", "use one or multiple cleft(s) as binding-site rgnopt_locclf = 'RNGOPT LOCCLF ' +", "self.state_data = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.trajectory_file = self.state_data['trajectoryFile'] self.mdp_file = self.state_data['mdpFile'] self.tpr_file", "dir_names = [] for dirname, dirnames, filenames in os.walk(folder_path): # print(dirname, '-') if", "side-chain rotamer acceptance # Float in interval [0.0,1.0] from fully permeable to no", "the Penultimate Rotamer Library #ROTOBS # Defines the grid spacing of the binding-site", "EPI_uber_dock.process_flexaid_ligand() >>> EPI_uber_dock.get_flexaid_clefts() >>> EPI_uber_dock.flexaid_generate_ga_dat_parameters() >>> EPI_uber_dock.flexaid_generate_config_input() >>> EPI_uber_dock.prep_FlexAid_dock_run_commands() >>> EPI_uber_dock.run_FlexAid_simulation(parallel=True, waitTime=15) >>>", "'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run_samples, 'directory': self.directories_samples, 'setup': self.setup_box, 'folderCreated': self.folder_exists_samples, 'simStates':", "prep_g_mmpbsa) return 'Do not prepare run files' if self.g_mmpbsa_prepared is True: print('Do not", "= True def get_exhaust_run_folder_name(self): curr_folder = os.getcwd() return curr_folder + os.sep + self.run_folder_name", "line for each flexible bond of the ligand # The allowable flexible bonds", "vina >>> EPI_uber_dock.set_up_Vina_Box() >>> EPI_uber_dock.prepare_Vina_run() >>> EPI_uber_dock.prepVinaSim_uberDock() >>> EPI_uber_dock.runVinaSim_uber() Molecule object loading of", "{3} \" \\ \"--center_z {4} \" \\ \"--size_x {5} \" \\ \"--size_y {6}", "\\ \"--num_modes {9} \" \\ \"--seed 10 \" \\ \"--log {10}.txt \" \\", "# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT", "''' :param filename: load json state data :return: ''' # self.absolute_path = os.path.abspath(filename)", "atoms # Float in interval [0.0,1.0] from fully permeable to no permeability PERMEA", "Specify the degrees of freedom (DOF) of the processed ligand with residue number", "self.exhaustiveness = self.state_data_samples['exhaustivenessList'] self.samples_run = self.state_data_samples['samplesList'] self.folder_path = self.state_data_samples['folderPath'] self.run_type = self.state_data_samples['runType'] self.molecule_name", "mol_name): self.molecule_name = mol_name def set_receptor_name(self, receptor_name): self.receptor_name = receptor_name # This might", "= self.state_data['indexFile'] self.folder_path = self.state_data['folderPath'] self.run_type = self.state_data['runType'] self.molecule_name = self.state_data['molName'] self.receptor_name =", "# result = run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num, data) # # test = 1 # # test", "unique RESNUMC identifier LIG9999A inplig = 'INPLIG ' + '{0}.inp\\n\\n'.format( self.ligand_flexaid_initials) # Specify", "# Float in interval [1.0-30.0] VARDIH 5.0 # The variation in degrees for", "+ 'centroid.pdb' >>> ligand_file = EPI_folder + os.sep + 'EPI.pdb' >>> molname =", "print('G_MMPBSA Object has been created') self.trajectory_file = traj self.topology_file = topol self.tpr_file =", "self.state_data_samples['receptorName'] # TODO test self.samples_exhaust = self.state_data_samples['samples_exhaust'] self.sim_folder_run_samples = self.state_data_samples['simRunFolder'] # .split('/')[-1] self.directories_samples", "# Reproduction model # Values in [BOOM,STEADY] REPMODEL BOOM # Fraction of population", "PERMEA 0.9 # Permeability for side-chain rotamer acceptance # Float in interval [0.0,1.0]", "pbdqt file formats. Then converts to pandas dataframe. Create MoleculeObject by parsing pdb", "Only output scored atoms in the final results # Comment the next line", "has the unique RESNUMC identifier LIG9999A inplig = 'INPLIG ' + '{0}.inp\\n\\n'.format( self.ligand_flexaid_initials)", "freedom (DOF) of the processed ligand with residue number 9999 and chain A", ">>> EPI_uber_dock.flexaid_generate_config_input() >>> EPI_uber_dock.prep_FlexAid_dock_run_commands() >>> EPI_uber_dock.run_FlexAid_simulation(parallel=True, waitTime=15) >>> >>> >>> # This is", "dask tasks for g_mmpbsa :return: ''' self.prepare_g_mmpbsa() test = 1 curr_client = dask_client", "from molmolpy.utils import helper as hlp # matplotlib.style.use('ggplot') sns.set(style=\"darkgrid\") low_seed = 1 high_seed", "mass :return: ''' self.trajectory_mdtraj = md.load_xtc(self.trajectory_file, top=self.topology_file) self.trajectory_mdtraj_topology = self.trajectory_mdtraj.topology self.trajectory_mdtraj_topology_dataframe = self.trajectory_mdtraj.topology.to_dataframe()", "+ results_dask['contrib_pol_filename'] out_mem = results_dask['contrib_pol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() # out_pdbqt_filename", "mdp_mem = mdp_file.read() mdp_filename = mdp_abs.split(os.sep)[-1] index_abs= full_g_mmpbsa_data['indexFile'] index_file = open(index_abs, 'r') index_mem", "mdp_mem}) data['dask'].update({'indexName': index_filename, 'indexMem': index_mem}) test = 1 # data['dask'].update({'cavFile':cav_file_mem }) # self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)]", "= self.receptor_name + '_' + self.molecule_name + '_' + self.run_type self.sim_folder_run = self.folder_path", "PSHARE # Parameters of the shared fitness function # Floats in interval [0.0,1000.0]", "!= job_quantity: finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) time.sleep(60) print('->' * 10) print('Everything", "don't forget # Exhaustiveness for all samples # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories_samples", "is None and filedata is None: # filename = self.json_state_file filename = self.absolute_json_state_file", "PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS", "CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL", "0 # # for worker in get_worker_free: # preped = get_worker_free[worker]['preped'] # total_free_cores", "{str(i):{}} temp_traj = self.trajectory_mdtraj[traj:traj+div_traj] temp_mdtraj.append(temp_traj) temp_mdtraj_indexes.append(i) file_save = 'traj_part{0}.xtc'.format(i) abs_file_save = folder_to_save +", "Running vina,whether it's for exhaustiveness or traditional run self.folder_path = folder_path self.command_run_list =", "index # print(curr_item) # How to save submitted jobs state print('-------') if curr_index", "final_queue_job = [] # Need to select those that are not finished for", "'mdpFile': self.mdp_file, 'tprFile': self.tpr_file, 'indexFile': self.index_file, 'runFolderName': self.run_folder_name, 'folderPath': self.folder_path, 'jsonStates': self.json_state_file, 'runType':", "= results[key] # self.state_data['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] # results_dask = results[key]['dask']", "+ self.ligand_name + '_LeDock Parameter file' self.ledock_rmsd = 0.5 self.set_up_ledock_dock_blind_parameters(title=self.ledock_title, receptor_file=self.lepro_pdb_file, ledock_rmsd=self.ledock_rmsd, x_center=self.x_center,", "mutation probabilities # Floats in interval [0.0,1.0] ADAPTKCO 0.95 0.10 0.95 0.10 #", "LasR_MOR_mmpbsa_calc.run_dask_docking(client) Notes ----- See examples/cluster/plot_dbscan.py for an example. This implementation bulk-computes all neighborhood", ":ref:`User Guide <MoleculeObject>`. Parameters ---------- filename : str, optional The maximum distance between", "as e: print(\"error in runSim: \", e) sys.exit(0) @hlp.timeit def check_dask_jobs(self, submitted_jobs_dask, finished_jobs,", "spacing of the binding-site # Float in interval [0.1,1.0] SPACER 0.375 # Exclude", "time.sleep(60) print('->' * 10) print('Everything is finished :))))))') print('---' * 10) print('\\n') def", "prepare dask tasks for g_mmpbsa :return: ''' self.prepare_g_mmpbsa() test = 1 curr_client =", "os.walk(folder): for i in filenames: # print i if 'out' in i: VIP.append(i)", "print('Creating rdock folder in uberDocker folder \\n') print(self.ledock_directories) folder_utils.create_folder(self.ledock_absolute_folder_name) test = 1 self.receptor_ledock_pdb", "[0.0,1.0] from fully permeable to no permeability PERMEA 0.9 # Permeability for side-chain", "\\ \"--log {10}.txt \" \\ \"--out {11}_out.pdbqt\".format(self.receptor_file, self.ligand_file, self.x_center, self.y_center, self.z_center, self.x_size, self.y_size,", "class GMMPBSAObject(object): \"\"\" Usage example >>> EPI_folder = '/media/Work/MEGA/Programming/StressHormones/dock_EPI' >>> EPI_samples = '/media/Work/MEGA/Programming/StressHormones/'", "self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list': self.ledock_samples}) self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states) for sample_num in self.ledock_samples: self.prep_LeDock_dock_command(sample_num) print('Now continue for LeDock:D') self.save_state_data_json()", "key_name = curr_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Passed running ', run_name) # submitted_jobs_dask_temp,", "print('TADA ', self.directories) self.json_state_file = self.sim_folder_run + os.sep + self.receptor_name + '_' +", "config input file Flexaid is very strict about spaces :return: ''' flexaid_config_input_template =", "worker_ids = {} for i, id in enumerate(get_worker_free): worker_ids.update({i: id}) custom_index_curr = 3", "'.json' if len(self.directories) == 0: print('Creating folder for g_mmpbsa run\\n') print(self.sim_folder_run) folder_utils.create_folder(self.sim_folder_run) self.folder_exists", "10.0 # Reproduction model # Values in [BOOM,STEADY] REPMODEL BOOM # Fraction of", "of population to create # Only considered when REPMODEL is BOOM BOOMFRAC 1.0", "this list of conditions and the following # disclaimer in the documentation and/or", "try: if finished_jobs_dict[i] is True: continue except Exception as error: pass finished_jobs.append(job) finished_jobs_dict.update({i:", "LeDock settings') except Exception as e: print(\"error in runSim: \", e) sys.exit(0) @hlp.timeit", "# # self.ledock_xmin = self.ledock_data['LeDock_params']['xmin'] # self.ledock_xmax = self.ledock_data['LeDock_params']['xmax'] # self.ledock_ymin = self.ledock_data['LeDock_params']['ymin']", ">>> >>> receptor_file = EPI_folder + os.sep + 'centroid.pdb' >>> ligand_file = EPI_folder", "# try: # os.system(command_to_run) # except KeyboardInterrupt: # # quit # sys.exit() print(\"LeDock", "for flexible dihedrals of the ligand # Float in interval [1.0-30.0] VARFLX 10.0", "ADAPTVGA is 0 CROSRATE 0.90 # Constant mutation probability # Float in interval", "open(mdp_abs, 'r') mdp_mem = mdp_file.read() mdp_filename = mdp_abs.split(os.sep)[-1] index_abs= full_g_mmpbsa_data['indexFile'] index_file = open(index_abs,", "# 'GA_DataName': self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy()", "= 'pro.pdb' # Need to check whteter lepro ran fine print('Updated receptor with", "math.ceil(traj_len/total_free_cores) # select_indexes = list(range(total_free_cores)) # Maximum parallel #div_traj = math.trunc(traj_len/total_free_cores) select_frames =", "flexible bonds flexible_bonds_data = open( self.flexaid_absolute_processed_files_folder + os.sep + '{0}.inp'.format(self.ligand_flexaid_initials), 'r') flexible_bonds_data_text =", "not None: # filedata = filedata # filename = self.absolute_json_state_file else: filedata =", "# Redistribution and use in source and binary forms, with or without #", ">>> EPI_uber_dock.calculate_max_radius_from_com() >>> EPI_uber_dock.calculate_cube_edges() >>> EPI_uber_dock.calculate_box_edges_from_com() >>> >>> >>> EPI_uber_dock.prepare_uber_docker() >>> >>> >>>", "# TODO test self.sim_folder_run = self.state_data['simRunFolder'] # .split('/')[-1] self.directories = self.state_data['directory'] self.folder_exists =", "jobs_running <max_jobus: print('Submit MMPBSA job to DASK') pop_item = queue_jobs.pop(curr_index) key_name = pop_item['save_run_name']", "+ '{0}\\n\\n'.format( self.receptor_flexaid_mol2) # Specify the processed ligand file to use # BTN.inp", "+ os.sep + self.receptor_name + '_' + self.molecule_name + '_' + self.run_type_samples +", "= results_dask['polar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep", "in pandas dataframe as well as numpy list. Read more in the :ref:`User", "# Number of generations # Integer in interval [1-N] NUMGENER 500 # Use", "def flexaid_generate_ga_dat_parameters(self): ''' Generate GA dat parameters for flexaid docking :return: ''' self.flexaid_ga_dat_param_template", "Library #ROTOBS # Defines the grid spacing of the binding-site # Float in", ">>> >>> #This is for rDock, and it works so comment this part", "self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex': self.second_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile': self.index_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile': self.mdp_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile': self.tpr_file}) self.save_state_data_json() test = 1 #self.g_mmpbsa_sim_states", "= self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] print('No need to generate LeDock commands') except: self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list': self.ledock_samples}) self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states) for", "True }) simStates['simStates'].update(temp_state) self.mdtraj_frames = select_frames self.mdtraj_sliced = temp_mdtraj self.mdtraj_parts = temp_mdtraj_indexes self.file_save_list", "'.dat' self.generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template)", ":return: ''' self.run_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + self.run_type", "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF", "acceptance threshold # Float in interval [0.0-1.0] DEECLA 0.8 # Use instances of", ">>> EPI_uber_dock.prepare_rdock_settings() >>> EPI_uber_dock.generate_rdock_cavity() >>> # Prepare and run Dock programs >>> EPI_uber_dock.prep_rDock_dock_run_commands()", "self.ledock_absolute_folder_name = self.uber_dock_folder + os.sep + self.ledock_folder_name test = 1 # self.directories =", "to think about seed self.save_run_name = 'vina_' + self.run_type_samples + '_' + str(sample_num)", "[1.0-30.0] VARANG 5.0 # The variation in degrees for the anchor dihedral of", "+ file_save file_save_list.append(file_save) abs_file_save_list.append(abs_file_save) temp_state[str(i)].update({'runFinished':False, 'index':i, 'absFolder':folder_to_save, 'fileSave':file_save, 'absFileSave':abs_file_save, 'firstIndex':self.first_index, 'secondIndex':self.second_index, 'indexFile':self.index_file, 'mdpFile':self.mdp_file,", "= second_index self.prep_g_mmpbsa_run = False self.folder_exists = False # Running vina,whether it's for", "except: self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list': self.ledock_samples}) self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states) for sample_num in self.ledock_samples: self.prep_LeDock_dock_command(sample_num) print('Now continue for LeDock:D')", "job.result() test = 1 try: key = list(results.keys())[0] prog = results[key]['Program'] # need", "key = list(results.keys())[0] prog = results[key]['Program'] # need [0] key sample_num = results[key]['part_num']", "part if curr_item_prog == 'g_mmpbsa': if workstation_freemem > gmmbpsa_min_mem and jobs_running <max_jobus: print('Submit", "interval [1-N] NUMCHROM 500 # Number of generations # Integer in interval [1-N]", "is BOOM BOOMFRAC 1.0 # Number of new individuals to generate at each", "filename: Saves state file :return: ''' # import json # with open(filename, 'w')", "filename: load json state data :return: ''' # self.absolute_path = os.path.abspath(filename) self.load_state_called_samples =", "i: # VIP.append(i) return VIP except Exception as e: print(\"error in find_files: \",", "print(self.sim_folder_run_samples) folder_utils.create_folder(self.sim_folder_run_samples) self.folder_exists_samples = True self.state_data_samples.update({'receptorFile': self.receptor_file, 'ligandFile': self.ligand_file, 'exhaustivenessList': self.exhaustiveness, 'samples_exhaust': self.samples_exhaust,", "# test = 1 # # # Try to load initial LeDock try:", "cool down # self.hold_nSec(5) print('This success ---> ', i) except Exception as error:", "OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON", "See examples/cluster/plot_dbscan.py for an example. This implementation bulk-computes all neighborhood queries, which increases", "are # met: # * Redistributions of source code must retain the above", "two samples for them to be considered as in the same neighborhood. >>>", "list(range(0,traj_len,div_traj)) select_indexes = list(range(len(select_frames))) folder_to_save = self.g_mmpbsa_folder temp_mdtraj = [] temp_mdtraj_indexes = []", "prepare_g_mmpbsa(self): ''' Prepare g_mmpbsa run folder and initial json configuration :return: ''' self.run_folder_name", "= math.ceil(traj_len/total_free_cores) # select_indexes = list(range(total_free_cores)) # Maximum parallel #div_traj = math.trunc(traj_len/total_free_cores) select_frames", "at each generation # Only considered when REPMODEL is STEADY # Integer in", "= self.sim_folder_run_samples + os.sep + self.receptor_name + '_' + self.molecule_name + '_' +", "# test = 1 # # total_free_cores = 0 # # for worker", "TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE", "NOINTR # Side-chain rotamer acceptance threshold # Float in interval [0.0-1.0] DEECLA 0.8", "print('-------') if curr_index == 0 and len(submitted_jobs_dask) == 1: curr_index = 0 else:", "flexible_index_list_phrases = [] flexible_index_list = [] for i in flexible_bonds_data_text_list: if 'FLEDIH' in", "# ############################################################################################### # # # work_address = workstation1_preped['workerAddress'] # # # # #", "parameters for flexaid docking :return: ''' self.flexaid_ga_dat_param_template = '''# Number of chromosomes (number", "0.5 self.set_up_ledock_dock_blind_parameters(title=self.ledock_title, receptor_file=self.lepro_pdb_file, ledock_rmsd=self.ledock_rmsd, x_center=self.x_center, y_center=self.y_center, z_center=self.z_center) else: print('state has beeen loaded \\n')", "IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ------------------------------------------------------------------------ --> import itertools", "print in console # Integer in interval [1,N] where N is NUMCHROM PRINTCHR", "of source code must retain the above copyright # notice, this list of", "# big_future = client.scatter(self.dask_prep, broadcast=True) # for worker in get_worker_free: # worker_info =", "num_samples_run + 1)) self.run_folder_name_samples = self.receptor_name + '_' + self.molecule_name + '_' +", "# matplotlib.style.use('ggplot') sns.set(style=\"darkgrid\") low_seed = 1 high_seed = 999999999 mgltools_utilities = '/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24' class", "self.state_data_samples['setup'] self.folder_exists = self.state_data_samples['folderCreated'] self.x_center = self.state_data_samples['boxSettings']['center_x'] self.y_center = self.state_data_samples['boxSettings']['center_y'] self.z_center = self.state_data_samples['boxSettings']['center_z']", "n + 1): print(i) time.sleep(1) # Delay for 1 sec print('Ok %s secs", "jobs_running = len(submitted_jobs_dask) - len(finished_jobs) max_jobus = max_jobs_to_run # g_mmpbsa part if curr_item_prog", "traj_file = open(traj_abs, 'rb') traj_mem = traj_file.read() traj_filename = data['fileSave'] data['dask'].update({'tprName': tpr_filename}) data['dask'].update({'mdpName':", "'lepro_abs_pdb': self.ledock_absolute_folder_name + os.sep + self.lepro_pdb_file, 'abs_receptor_pdb': self.absolute_receptor_ledock_pdb, 'abs_ligand_mol2': self.absolute_ligand_ledock_mol2, 'LeDockFolderStatus': self.ledock_folder_exists, 'LeDockAbsFolder':", "Parameters ---------- filename : str, optional The maximum distance between two samples for", "# Copyright (c) 2016-2019,<NAME>. # All rights reserved. # Redistribution and use in", "os.system(command_to_run) # except KeyboardInterrupt: # # quit # sys.exit() print(\"LeDock command generation finished\")", "and mutation probabilities # Floats in interval [0.0,1.0] ADAPTKCO 0.95 0.10 0.95 0.10", "curr_index = 0 if curr_worker == len(worker_ids): curr_worker = 0 print('-----------------------------------------------------------------') worker_status_temp =", "to DASK') pop_item = queue_jobs.pop(curr_index) key_name = pop_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Cur", "str(sample_num) command_to_run = \"vina --receptor {0} \" \\ \"--ligand {1} \" \\ \"--center_x", "{1} {2}\\n'.format(self.flexaid_res_number, self.flexaid_res_chain, flex_index) flexible_index_list_phrases.append(temp_line) test = 1 final_str += pdbnam final_str +=", "sys.exit(0) @hlp.timeit def check_dask_jobs(self, submitted_jobs_dask, finished_jobs, finished_jobs_dict): import copy # modified_submitted_jobs_dask = copy.deepcopy(submitted_jobs_dask)", "self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # # # self.state_data_samples = self.state_data.copy() # # self.save_state_data_json()", "dirnames, filenames in os.walk(folder): for i in filenames: # print i if 'out'", "article Pagadala Software for molecular docking: a review # This will be for", "os.sep + self.receptor_name + '_' + self.molecule_name + '_' + self.run_type + '.json'", "10 \" \\ \"--log {10}.txt \" \\ \"--out {11}_out.pdbqt\".format(self.receptor_file, self.ligand_file, self.x_center, self.y_center, self.z_center,", "don't forget # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name) print('TADA ',", "open(self.generate_ga_dat_name_abs, 'w') self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) self.generate_ga_dat_object_file.close() self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) self.state_data['dockSoftware']['FlexAid']['GA_params'].update( {'generateGA_param': self.generate_ga_dat_pameters, 'GA_DataName': self.generate_ga_dat, 'GA_DATA_Abs': self.generate_ga_dat_name_abs,", "os.path.abspath(filename) self.load_state_called_samples = True print(os.path.abspath(__file__)) self.state_data_samples = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.receptor_file =", "promote products derived # from this software without specific prior written permission. #", "# self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = results[key] # self.state_data['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] #", ":func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- \"\"\" def __init__(self, traj, topol, tpr_file, mdp_file,", "index_filename, 'indexMem': index_mem}) test = 1 # data['dask'].update({'cavFile':cav_file_mem }) # self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)] = data", "os.sep + self.lepro_pdb_file, 'abs_receptor_pdb': self.absolute_receptor_ledock_pdb, 'abs_ligand_mol2': self.absolute_ligand_ledock_mol2, 'LeDockFolderStatus': self.ledock_folder_exists, 'LeDockAbsFolder': self.ledock_absolute_folder_name, 'LeDockFolderName': self.ledock_folder_name})", "'apolar':apolar, 'contrib_MM':contrib_mm, 'contrib_pol':contrib_pol, 'contrib_apol':contrib_apol}) temp_traj.save(abs_file_save) temp_state[str(i)].update({'fileSaved': True }) simStates['simStates'].update(temp_state) self.mdtraj_frames = select_frames self.mdtraj_sliced", "= self.ledock_data['setup_LeDock'] # self.ledock_num_samples = self.ledock_data['num_samples'] # self.ledock_input_info = self.ledock_data['LeDockInputInfo'] # self.param_ledock_template =", "{11}_out.pdbqt\".format(self.receptor_file, self.ligand_file, self.x_center, self.y_center, self.z_center, self.x_size, self.y_size, self.z_size, self.samples_exhaust, self.num_modes, self.save_run_name, self.save_run_name) print(command_to_run)", "tpr_file.read() tpr_filename = tpr_abs.split(os.sep)[-1] # mdp_abs= full_g_mmpbsa_data['mdpFile'] mdp_file = open(mdp_abs, 'r') mdp_mem =", "\"--center_z {4} \" \\ \"--size_x {5} \" \\ \"--size_y {6} \" \\ \"--size_z", "submitted jobs state print('-------') if curr_index == 0 and len(submitted_jobs_dask) == 1: curr_index", "+= FlexAid # # run_docking_queue += run_docking_Vina[curr_Vina:curr_Vina + Vina] # curr_Vina += Vina", "coding: utf-8 -*- # !/usr/bin/env python # # @file __init__.py # @brief G_MMPBSA", "# os.system(command_to_run) # except KeyboardInterrupt: # # quit # sys.exit() print(\"LeDock command generation", "get_worker_free[worker]['preped'] # total_free_cores += preped['freeCores'] if prep_g_mmpbsa is False: print('prep gmmpbsa ', prep_g_mmpbsa)", "'simRunFolder': self.sim_folder_run, 'RunFolder': self.g_mmpbsa_folder, 'absoluteJsonStates': self.absolute_json_state_file, 'directory': self.directories, 'folderCreated': self.folder_exists, 'simStates': {}}) self.state_data.update(programs_dict)", "= self.g_mmpbsa_folder # original_data['AbsFolder'] out_name = abs_folder + os.sep + results_dask['out_filename'] out_mem =", "' + '{0}.inp\\n\\n'.format( self.ligand_flexaid_initials) # Specify to use one or multiple cleft(s) as", "True self.generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' + self.ligand_name + '.dat' self.generate_ga_dat_name_abs", "self.state_data['molName'] self.receptor_name = self.state_data['receptorName'] # TODO test self.sim_folder_run = self.state_data['simRunFolder'] # .split('/')[-1] self.directories", "temp_line = 'OPTIMZ {0} {1} {2}\\n'.format(self.flexaid_res_number, self.flexaid_res_chain, flex_index) flexible_index_list_phrases.append(temp_line) test = 1 final_str", "files for upload G_MMPBSA files # test = 1 # tasks_upload = []", "finished simulations, other submits jobs ############################################################################################### gmmbpsa_min_mem = 1000 retries_num = 2 curr_index", "print(\"Running Vina\") # TODO need to think about seed self.save_run_name = 'vina_' +", "= list(range(1, num_samples_run + 1)) self.run_folder_name_samples = self.receptor_name + '_' + self.molecule_name +", "temp_state = {str(i):{}} temp_traj = self.trajectory_mdtraj[traj:traj+div_traj] temp_mdtraj.append(temp_traj) temp_mdtraj_indexes.append(i) file_save = 'traj_part{0}.xtc'.format(i) abs_file_save =", "since info is inside pdbqt file # elif 'vina_sample_' in i: # VIP.append(i)", "receptor center of mass :return: ''' self.trajectory_mdtraj = md.load_xtc(self.trajectory_file, top=self.topology_file) self.trajectory_mdtraj_topology = self.trajectory_mdtraj.topology", "curr_worker += 1 time.sleep(10) test = 1 # ############################################################################################### # # # work_address", "to be thought out #################################################################################################################### def flexaid_generate_ga_dat_parameters_dask(self): ''' Generate GA dat parameters for", "if len(self.directories_samples) == 0: print('Creating folder for vina samples run\\n') print('Vina run type:", "mdtraj topology and save as pandas dataframe Calculate pdb receptor center of mass", "+= run_docking_FlexAid[curr_FlexAid:curr_FlexAid + FlexAid] # # curr_FlexAid += FlexAid # # run_docking_queue +=", "O(n). Sparse neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ----------", "sys.exit(0) def get_molecule_name(self): return self.molecule_name def get_receptor_name(self): return self.receptor_name def set_molecule_name(self, mol_name): self.molecule_name", "need to generate LeDock commands') except: self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list': self.ledock_samples}) self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states) for sample_num in self.ledock_samples:", "'/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24' class GMMPBSAObject(object): \"\"\" Usage example >>> EPI_folder = '/media/Work/MEGA/Programming/StressHormones/dock_EPI' >>> EPI_samples =", "def prep_LeDock_dock_command(self, sample_num, pose_gen=20): ''' prepare each separate rDock run command :param sample_num:", "first_index, second_index, molname, receptor_name) >>> >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client) >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True)", "'.dok' # -spli MOR_flexaid.dok command_to_clean = \"{0} -spli {1}\".format(command_receptor, ligand_clear_dok) print(command_to_run) self.LeDock_command_run_list.append(command_to_run) print(\"Launching", "degrees for the anchor dihedral of the ligand # Float in interval [1.0-30.0]", "to no permeability PERMEA 0.9 # Permeability for side-chain rotamer acceptance # Float", "self.g_mmpbsa_folder # original_data['AbsFolder'] out_name = abs_folder + os.sep + results_dask['out_filename'] out_mem = results_dask['out_mem']", "False: # print(\"Running Vina\") # TODO need to think about seed self.save_run_name =", "parm_name) ligand_clear_dok = sample_data['ligand_clear_name'] + '.dok' # -spli MOR_flexaid.dok command_to_clean = \"{0} -spli", "think about seed #./ ledock_linux_x86 dock. in command_receptor = self.ledock_path + os.sep +", "None and filedata is None: # # filename = self.json_state_file # filename =", "Specify the processed target file to use pdbnam = 'PDBNAM ' + '{0}\\n\\n'.format(", "copyright # notice, this list of conditions and the following disclaimer. # *", "saving state run # filename = self.sim_folder_run + os.sep + self.receptor_name + '_'", "second_index, molname, receptor_name) >>> >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client) >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True) >>>", "Add one extra line for each flexible bond of the ligand # The", "filedata = filedata filename = filename json.dump(filedata, open(filename, \"w\"), sort_keys=True, indent=4) # TODO", "= 1 # data['dask'].update({'cavFile':cav_file_mem }) # self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)] = data test = 1 run_g_mmpbsa.append(data)", "temp_mdtraj = [] temp_mdtraj_indexes = [] file_save_list = [] abs_file_save_list = [] simStates", "is False: print('Please setup simulation box') sys.exit(0) self.run_type_samples = run_type self.prep_samples_run = True", "tasks_upload.append(task) # print(\"Starting uploading to \", worker_address) test = 1 # TODO #", "with the distribution. # * Neither the name of the molmolpy Developers nor", "# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A", "abs_file_save_list self.simStates = simStates test = 1 self.g_mmpbsa_prepared = True self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) self.state_data['energySoftware']['g_mmpbsa'].update({'prepare':", "Integer in interval [1-N] NUMCHROM 500 # Number of generations # Integer in", "self.ledock_xmax = self.ledock_data['LeDock_params']['xmax'] # self.ledock_ymin = self.ledock_data['LeDock_params']['ymin'] # self.ledock_ymax = self.ledock_data['LeDock_params']['ymax'] # self.ledock_zmin", "self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] # print('No need to generate LeDock commands') # self.prep_LeDock_run =", "= self.folder_path + os.sep + self.run_folder_name # Create folder don't forget # self.directories", "= False # This part needs clarification self.prep_mdtraj_object() # original data before transformation", "# # test = 1 ################################################################################################### test = 1 #################################################################################################### self.run_mmpbsa_dask = run_g_mmpbsa", "print(command_to_run) self.LeDock_command_run_list.append(command_to_run) print(\"Launching new Sim\") temp_dict = {str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'commandToClean':command_to_clean,", "self.g_mmpbsa_folder temp_mdtraj = [] temp_mdtraj_indexes = [] file_save_list = [] abs_file_save_list = []", "quiiee # # retries_num = 2 # # task = client.submit(run_dask_tools.run_vina_using_dask, # #", "as error: print('error is ', error) # print('i is ', i) print('Finished checking", "Float in interval [0.0,1.0] # Only considered when ADAPTVGA is 0 MUTARATE 0.10", "NUMCHROM PRINTCHR 10 ''' self.generate_ga_dat_pameters = True self.generate_ga_dat = 'ga_inp_' + self.receptor_name +", "# self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = \\ # results[key] update_results = copy.deepcopy(results) update_results[key].pop('dask', None) #", "= \"{0} {1}\".format(command_receptor, parm_name) ligand_clear_dok = sample_data['ligand_clear_name'] + '.dok' # -spli MOR_flexaid.dok command_to_clean", "by default) # Only considered if EXCHET is disabled # To include water", "not necessary since info is inside pdbqt file # elif 'vina_sample_' in i:", "= self.receptor_name + '_' + self.molecule_name + '_' + 'LeDock' self.ledock_absolute_folder_name = self.uber_dock_folder", "to number of free cores # TODO article Pagadala Software for molecular docking:", "for the anchor dihedral of the ligand # Float in interval [1.0-30.0] VARDIH", "converts to pandas dataframe. Create MoleculeObject by parsing pdb or pdbqt file. 2", "= {} self.g_mmpbsa_run_finished = False self.g_mmpbsa_sim_states = {'simStates': {}} self.objects_loaded = False self.g_mmpbsa_prepared", "data['dask'].update({'indexName': index_filename}) data['dask'].update({'trajMem':traj_mem, 'trajName':traj_filename}) data['dask'].update({'tprName': tpr_filename, 'tprMem': tpr_mem}) data['dask'].update({'mdpName': mdp_filename, 'mdpMem': mdp_mem}) data['dask'].update({'indexName':", "be thought out #################################################################################################################### def prepare_samples_collection_run(self, standard_exhaust=128, num_samples_run=100, run_type='samples_run'): if self.setup_box is False:", "max_jobs_to_run # g_mmpbsa part if curr_item_prog == 'g_mmpbsa': if workstation_freemem > gmmbpsa_min_mem and", "print(\"Vina sample run command prep finished\") else: print('Please setup vina box settings') #", "key='key_scatter_{0}'.format(worker_address), # retries=retries_num) # tasks_upload.append(task) # print(\"Starting uploading to \", worker_address) test =", "self.state_data_samples = self.state_data.copy() ############################################################################################## def flexaid_generate_config_input_dask(self): ''' Generate flexaid config input file Flexaid", "'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['energyMM_filename'] out_mem = results_dask['energyMM_mem']", "'\\n' rmsdst = 'RMSDST ' + '{0}_ref.pdb\\n\\n'.format( self.ligand_flexaid_initials) final_str += rmsdst final_str +=", ")] = \\ # results[key] update_results = copy.deepcopy(results) update_results[key].pop('dask', None) # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )]", "0: print('Creating folder for g_mmpbsa run\\n') print(self.sim_folder_run) folder_utils.create_folder(self.sim_folder_run) self.folder_exists = True programs_dict =", "'g_mmpbsa'}) data.update({'part_num': part_num}) data.update({'save_run_name': save_run_name}) data.update({'dask': {}}) traj_abs = data['absFileSave'] traj_file = open(traj_abs,", "CROSRATE 0.90 # Constant mutation probability # Float in interval [0.0,1.0] # Only", "Maximum parallel #div_traj = math.trunc(traj_len/total_free_cores) select_frames = list(range(0,traj_len,div_traj)) select_indexes = list(range(len(select_frames))) folder_to_save =", "self.run_type_samples self.sim_folder_run_samples = self.folder_path + os.sep + self.run_folder_name_samples # Create folder don't forget", "= True test = 1 # TODO enter ledock folder and process structure", "self.z_center = self.state_data_samples['boxSettings']['center_z'] self.x_size = self.state_data_samples['boxSettings']['size_x'] self.y_size = self.state_data_samples['boxSettings']['size_y'] self.z_size = self.state_data_samples['boxSettings']['size_z'] self.num_modes", "= client worker_status = run_dask_tools.get_dask_worker_status(curr_client) get_worker_free = run_dask_tools.check_free_resources(worker_status) import copy original_get_worker_free = copy.deepcopy(get_worker_free)", "acids,cofactors,ligands) # To exclude these groups, uncomment the next line #EXCHET # Include", "temp_mdtraj self.mdtraj_parts = temp_mdtraj_indexes self.file_save_list = file_save_list self.abs_file_save_list = abs_file_save_list self.simStates = simStates", "EPI_uber_dock.calculate_cube_edges() >>> EPI_uber_dock.calculate_box_edges_from_com() >>> >>> >>> EPI_uber_dock.prepare_uber_docker() >>> >>> >>> #This is for", "workers=[workstation_address], key=run_name, retries=retries_num) submitted_jobs_dask.append(task_g_mmpbsa) else: key_name = curr_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Passed", "= 1 #################################################################################################### self.run_mmpbsa_dask = run_g_mmpbsa curr_LeDock = 0 # very slow #", "'.json' # This will hold information about run states if len(self.directories_samples) == 0:", "self.molecule_name + '_' + 'rDock' # self.rdock_absolute_folder_name = self.uber_dock_folder + os.sep + self.rdock_folder_name", "= 1 curr_worker_id = worker_ids[curr_worker] workstation_info_temp = get_worker_free_temp[curr_worker_id] workstation_preped_temp = workstation_info_temp['preped'] workstation_address =", "= [] run_mmpbsa_queue = [] # Prepare outputs import copy self.before_dask = copy.deepcopy(self.state_data)", "neighbors, while original DBSCAN had memory complexity O(n). Sparse neighborhoods can be precomputed", "= self.uber_dock_folder + os.sep + self.ledock_folder_name test = 1 # self.directories = self.find_sample_folders(self.folder_path,", "(DOF) of the processed ligand with residue number 9999 and chain A #", "# modification, are permitted provided that the following conditions are # met: #", "interval [0.0,1000.0] SHAREALF 4.0 SHAREPEK 5.0 SHARESCL 10.0 # Reproduction model # Values", "VIP except Exception as e: print(\"error in find_files: \", e) sys.exit(0) def find_sample_folders(self,", "(genetic-algorithms) METOPT GA # The variation in degrees for the anchor angle of", "command_to_clean = \"{0} -spli {1}\".format(command_receptor, ligand_clear_dok) print(command_to_run) self.LeDock_command_run_list.append(command_to_run) print(\"Launching new Sim\") temp_dict =", "+= preped['freeCores'] if prep_g_mmpbsa is False: print('prep gmmpbsa ', prep_g_mmpbsa) return 'Do not", "= 1 ###################################################3 # update index # print(curr_item) # How to save submitted", "self.json_samples_state_file self.load_state_called_samples = True self.load_samples_state_data_json(self.load_state_file_samples) self.prep_sample_run = True def get_exhaust_run_folder_name(self): curr_folder = os.getcwd()", "1 self.prep_LeDock_run = True @hlp.timeit def prep_LeDock_dock_command(self, sample_num, pose_gen=20): ''' prepare each separate", "interval [1.0-30.0] VARDIH 5.0 # The variation in degrees for flexible dihedrals of", "[] # Need to select those that are not finished for pre_job in", "= self.dask_prep # run_dask_tools.upload_g_mmpbsa_files_dask(big_future) #TODO # Scatter a lot better using scatter for", "# print(pre_job) if pre_job['runFinished'] is False: final_queue_job.append(pre_job) test = 1 self.run_mmpbsa_dask = final_queue_job", "tpr_filename = tpr_abs.split(os.sep)[-1] # mdp_abs= full_g_mmpbsa_data['mdpFile'] mdp_file = open(mdp_abs, 'r') mdp_mem = mdp_file.read()", "generate LeDock commands') # self.prep_LeDock_run = True # except: # print('LeDock_params simStates is", "data['fileSave'] data['dask'].update({'tprName': tpr_filename}) data['dask'].update({'mdpName': mdp_filename}) data['dask'].update({'indexName': index_filename}) data['dask'].update({'trajMem':traj_mem, 'trajName':traj_filename}) data['dask'].update({'tprName': tpr_filename, 'tprMem': tpr_mem})", "5.0 # The variation in degrees for the anchor dihedral of the ligand", "# Float in interval [0.0,1.0] from fully permeable to no permeability ROTPER 0.8", "LOCCLF ' + 'global_binding_site.pdb\\n\\n' # Specify the degrees of freedom (DOF) of the", "finished\") else: print('Please setup vina box settings') # except Exception as e: #", "dirname, dirnames, filenames in os.walk(folder): for i in filenames: # print i if", "print('Please setup LeDock settings') except Exception as e: print(\"error in runSim: \", e)", "self.state_data.update(programs_dict) # self.prepVinaSim_exhaust() self.save_state_data_json() self.load_state_called = False else: self.load_state_file = self.json_state_file self.load_state_called =", "self.trajectory_mdtraj = md.load_xtc(self.trajectory_file, top=self.topology_file) self.trajectory_mdtraj_topology = self.trajectory_mdtraj.topology self.trajectory_mdtraj_topology_dataframe = self.trajectory_mdtraj.topology.to_dataframe() self.objects_loaded = True", "out_mem = results_dask['apolar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder +", "molname self.receptor_name = receptor_name self.run_type = 'g_mmpbsa' self.state_data = {} self.state_data_samples = {}", "os.sep + self.generate_ga_dat self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) self.generate_ga_dat_object_file.close() self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) self.state_data['dockSoftware']['FlexAid']['GA_params'].update( {'generateGA_param':", "are possible INTRAGEN # Specifies that the initial population is generated randomly POPINIMT", "'rb') tpr_mem = tpr_file.read() tpr_filename = tpr_abs.split(os.sep)[-1] # mdp_abs= full_g_mmpbsa_data['mdpFile'] mdp_file = open(mdp_abs,", "# Delay for 1 sec print('Ok %s secs have pass' % (n)) @hlp.timeit", "self.molecule_name + '_' + 'LeDock' self.ledock_absolute_folder_name = self.uber_dock_folder + os.sep + self.ledock_folder_name test", "'_' + self.molecule_name + '_' + 'rDock' # self.rdock_absolute_folder_name = self.uber_dock_folder + os.sep", "+= 2 print('----------------TEST------------------') curr_item = queue_jobs[curr_index] test = 1 curr_worker_id = worker_ids[curr_worker] workstation_info_temp", "use pdbnam = 'PDBNAM ' + '{0}\\n\\n'.format( self.receptor_flexaid_mol2) # Specify the processed ligand", "= 0 # # for worker in get_worker_free: # preped = get_worker_free[worker]['preped'] #", "pdbnam = 'PDBNAM ' + '{0}\\n\\n'.format( self.receptor_flexaid_mol2) # Specify the processed ligand file", "Outputfile(\"sd\", \"{0}.sd\".format(self.ligand_name)) # self.output_ligand_rdock.write(self.ligand_pybel ) # self.output_ligand_rdock.close() self.ledock_folder_name = self.receptor_name + '_' +", "self.sim_folder_run_samples + os.sep + self.receptor_name + '_' + self.molecule_name + '_' + self.run_type_samples", "separate rDock run command :param sample_num: :param pose_gen: default generate 20 poses :return:", "len(queue_jobs): curr_index = 0 if curr_worker == len(worker_ids): curr_worker = 0 print('-----------------------------------------------------------------') worker_status_temp", "# Comment the next line if you wish to obtain the whole complex", "final_str += rmsdst final_str += flexaid_config_input_template generate_config_input_file = 'CONFIG_' + self.receptor_name + '-'", "run type: {0}'.format(self.run_type_samples)) print(self.sim_folder_run_samples) folder_utils.create_folder(self.sim_folder_run_samples) self.folder_exists_samples = True self.state_data_samples.update({'receptorFile': self.receptor_file, 'ligandFile': self.ligand_file, 'exhaustivenessList':", "filename is None and filedata is None: # filename = self.json_state_file filename =", "self.sim_folder_run_samples = self.state_data_samples['simRunFolder'] # .split('/')[-1] self.directories_samples = self.state_data_samples['directory'] self.setup_box = self.state_data_samples['setup'] self.folder_exists =", "self.folder_path = folder_path self.command_run_list = [] self.command_samples_run_list = [] self.molecule_name = molname self.ligand_name", "copy.deepcopy(results) update_results[key].pop('dask', None) # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = results[key] # self.state_data['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)]", "= self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) test = 1 ###################################################3 # update index # print(curr_item)", "{} self.state_data_samples = {} self.g_mmpbsa_run_finished = False self.g_mmpbsa_sim_states = {'simStates': {}} self.objects_loaded =", "of the ligand # Float in interval [1.0-30.0] VARDIH 5.0 # The variation", "multiple cleft(s) as binding-site rgnopt_locclf = 'RNGOPT LOCCLF ' + 'global_binding_site.pdb\\n\\n' # Specify", "None and filedata is None: # filename = self.json_state_file filename = self.absolute_json_state_file filedata", "'runFinished': False}}) # try: # os.system(command_to_run) # except KeyboardInterrupt: # # quit #", "original DBSCAN had memory complexity O(n). Sparse neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph", "Stores molecule information in pandas dataframe as well as numpy list. Read more", "which increases the memory complexity to O(n.d) where d is the average number", "md.load_xtc(self.trajectory_file, top=self.topology_file) self.trajectory_mdtraj_topology = self.trajectory_mdtraj.topology self.trajectory_mdtraj_topology_dataframe = self.trajectory_mdtraj.topology.to_dataframe() self.objects_loaded = True def get_uber_g_mmpbsa_run_folder_name(self):", "False}} self.LeDock_sim_states.update(temp_dict) self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict) # try: # os.system(command_to_run) # except KeyboardInterrupt: # # quit", "# print(curr_item) # How to save submitted jobs state print('-------') if curr_index ==", "with PID:{}\".format(current_pid)) # free_threads_for_Vina = num_threads - LeDock-rDock-FlexAid run_g_mmpbsa = [] run_mmpbsa_queue =", "path is set to ', path) self.ledock_path = path def prep_mdtraj_object(self): ''' Prepare", "Optimization method (genetic-algorithms) METOPT GA # The variation in degrees for the anchor", "NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "= self.trajectory_mdtraj.topology.to_dataframe() self.objects_loaded = True def get_uber_g_mmpbsa_run_folder_name(self): curr_folder = os.getcwd() return curr_folder +", "self.state_data['energySoftware']['g_mmpbsa']['frames'] self.mdtraj_parts = self.state_data['energySoftware']['g_mmpbsa']['parts'] self.file_save_list = self.state_data['energySoftware']['g_mmpbsa']['fileList'] self.abs_file_save_list = self.state_data['energySoftware']['g_mmpbsa']['absFileList'] self.simStates = self.state_data['energySoftware']['g_mmpbsa']['simStates']", "fully permeable to no permeability ROTPER 0.8 # Solvent term penalty # When", "0 print('-----------------------------------------------------------------') worker_status_temp = run_dask_tools.get_dask_worker_status(curr_client, custom_index=custom_index_curr) get_worker_free_temp = run_dask_tools.check_free_resources(worker_status_temp) custom_index_curr += 2 print('----------------TEST------------------')", "for y in flexible_index_list_phrases: final_str += y final_str += '\\n' rmsdst = 'RMSDST", "for g_mmpbsa :return: ''' self.prepare_g_mmpbsa() test = 1 curr_client = dask_client # Testing", "self.before_dask = copy.deepcopy(self.state_data) ################################################################################ if self.g_mmpbsa_prepared is True: full_g_mmpbsa_data = self.state_data['energySoftware']['g_mmpbsa'] test =", "with or without # modification, are permitted provided that the following conditions are", "finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) time.sleep(60) print('->' * 10) print('Everything is finished :))))))')", "# Scatter a lot better using scatter for big files for upload G_MMPBSA", "self.generate_ga_dat_pameters, # 'GA_DataName': self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # # #", "conditions and the following disclaimer. # * Redistributions in binary form must reproduce", "data.update({'dask': {}}) traj_abs = data['absFileSave'] traj_file = open(traj_abs, 'rb') traj_mem = traj_file.read() traj_filename", "'contrib_MM':contrib_mm, 'contrib_pol':contrib_pol, 'contrib_apol':contrib_apol}) temp_traj.save(abs_file_save) temp_state[str(i)].update({'fileSaved': True }) simStates['simStates'].update(temp_state) self.mdtraj_frames = select_frames self.mdtraj_sliced =", "data['dask'].update({'tprName': tpr_filename, 'tprMem': tpr_mem}) data['dask'].update({'mdpName': mdp_filename, 'mdpMem': mdp_mem}) data['dask'].update({'indexName': index_filename, 'indexMem': index_mem}) test", "self.prep_LeDock_dock_run_commands() @hlp.timeit def prep_LeDock_dock_run_commands(self, num_samples=10): ''' Prepare rdock run commands and save to", "run_docking_Vina final_queue_job = [] # Need to select those that are not finished", "Fitness function # Value in [LINEAR,PSHARE] FITMODEL PSHARE # Parameters of the shared", "# self.ledock_param_title = self.ledock_data['LeDock_params']['title'] # self.rdock_title = self.ledock_data['LeDock_params']['title'] # # self.receptor_file_ledock = self.ledock_data['LeDock_params']['receptorFile']", "# if prep_g_mmpbsa is True: # # self.prepare_uber_docker() # self.prepare_ledock_settings() # self.prep_LeDock_dock_run_commands() @hlp.timeit", "queue_jobs[0] # # result = run_dask_tools.run_gmmpbsa_using_dask(job_test) test = 1 # Local upload test", "+ 'LeDock' self.ledock_absolute_folder_name = self.uber_dock_folder + os.sep + self.ledock_folder_name test = 1 #", "save submitted jobs state print('-------') if curr_index == 0 and len(submitted_jobs_dask) == 1:", "* 10) print('Everything is finished :))))))') print('---' * 10) print('\\n') def prepare_for_dask_cluster(self, LeDock=2,", "', error) # print('i is ', i) print('Finished checking dask submissions ---\\n') print('---'", "beeen loaded \\n') ############################################################################## def flexaid_generate_ga_dat_parameters(self): ''' Generate GA dat parameters for flexaid", "simStates is empty verify yolo') # # test = 1 def prepare_g_mmpbsa(self): '''", "0.95 0.10 0.95 0.10 # Constant crossover probability # Float in interval [0.0,1.0]", "outfile) # pickle.dump(self.cluster_models, open(filename, \"wb\")) # TODO create folder for run saving state", "to load initial LeDock try: self.mdtraj_frames = self.state_data['energySoftware']['g_mmpbsa']['frames'] self.mdtraj_parts = self.state_data['energySoftware']['g_mmpbsa']['parts'] self.file_save_list =", "self.directories = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name) print('TADA ', self.directories) self.json_state_file = self.sim_folder_run + os.sep +", "folder_path='.', job_name = 'Unknown', load_state_file=None): self.load_state_file = load_state_file if load_state_file is not None:", "print(\"error in runSim: \", e) sys.exit(0) @hlp.timeit def check_dask_jobs(self, submitted_jobs_dask, finished_jobs, finished_jobs_dict): import", "self.load_state_called_samples = True self.load_samples_state_data_json(self.load_state_file_samples) self.prep_sample_run = True def get_exhaust_run_folder_name(self): curr_folder = os.getcwd() return", "# TEST IT WORKS # queue_jobs = self.run_mmpbsa_dask # job_test = queue_jobs[0] #", "top=self.topology_file) self.trajectory_mdtraj_topology = self.trajectory_mdtraj.topology self.trajectory_mdtraj_topology_dataframe = self.trajectory_mdtraj.topology.to_dataframe() self.objects_loaded = True def get_uber_g_mmpbsa_run_folder_name(self): curr_folder", "target (always removed by default) # Only considered if EXCHET is disabled #", "{1}\".format(command_receptor, ligand_clear_dok) print(command_to_run) self.LeDock_command_run_list.append(command_to_run) print(\"Launching new Sim\") temp_dict = {str(sample_num): {'save_run_name': self.save_run_name, 'commandRun':", "= True self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates)", "types of parsers can be used: 1.molmolpy 2. pybel Stores molecule information in", "Value in [LINEAR,PSHARE] FITMODEL PSHARE # Parameters of the shared fitness function #", "test = 1 # test = 1 run_mmpbsa_queue = run_g_mmpbsa # run_docking_queue =", "workstation1_preped['workerAddress'] # # # # # This is to run on dask server", "for receptor and sd for ligand :return: ''' # self.output_receptor_rdock = Outputfile(\"mol2\", \"{0}.mol2\".format(self.receptor_name))", "removed by default) # Only considered if EXCHET is disabled # To include", "= self.state_data_samples['receptorFile'] self.ligand_file = self.state_data_samples['ligandFile'] self.exhaustiveness = self.state_data_samples['exhaustivenessList'] self.samples_run = self.state_data_samples['samplesList'] self.folder_path =", "setting part is empty verify yolo') # # try: # self.ledock_param_title = self.ledock_data['LeDock_params']['title']", "self.LeDock_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] # self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] # print('No need to generate LeDock", "= 0 if curr_worker == len(worker_ids): curr_worker = 0 print('-----------------------------------------------------------------') worker_status_temp = run_dask_tools.get_dask_worker_status(curr_client,", "self.mdtraj_sliced = temp_mdtraj self.mdtraj_parts = temp_mdtraj_indexes self.file_save_list = file_save_list self.abs_file_save_list = abs_file_save_list self.simStates", "== 0: print('Creating folder for vina samples run\\n') print('Vina run type: {0}'.format(self.run_type_samples)) print(self.sim_folder_run_samples)", "self.samples_run = self.state_data_samples['samplesList'] self.folder_path = self.state_data_samples['folderPath'] self.run_type = self.state_data_samples['runType'] self.molecule_name = self.state_data_samples['molName'] self.receptor_name", ">>> EPI_uber_dock.prepare_flexaid_settings() >>> EPI_uber_dock.process_flexaid_ligand() >>> EPI_uber_dock.get_flexaid_clefts() >>> EPI_uber_dock.flexaid_generate_ga_dat_parameters() >>> EPI_uber_dock.flexaid_generate_config_input() >>> EPI_uber_dock.prep_FlexAid_dock_run_commands() >>>", "(n)) @hlp.timeit def prepVinaSampleCommand(self, sample_num): # try: if self.setup_box is not False: #", "= list(range(total_free_cores)) # Maximum parallel #div_traj = math.trunc(traj_len/total_free_cores) select_frames = list(range(0,traj_len,div_traj)) select_indexes =", "= queue_jobs.pop(curr_index) key_name = pop_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Cur run ', run_name)", "groups, uncomment the next line #EXCHET # Include water molecules in the target", "numpy list. Read more in the :ref:`User Guide <MoleculeObject>`. Parameters ---------- filename :", "else: self.load_state_file_samples = self.json_samples_state_file self.load_state_called_samples = True self.load_samples_state_data_json(self.load_state_file_samples) self.prep_sample_run = True def get_exhaust_run_folder_name(self):", "1 self.g_mmpbsa_prepared = True self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList':", "using the Penultimate Rotamer Library #ROTOBS # Defines the grid spacing of the", "# Specifies that the initial population is generated randomly POPINIMT RANDOM # Fitness", "Float in interval [1.0-30.0] VARANG 5.0 # The variation in degrees for the", "1 # This will hold information about run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name()", "# self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)] = results[key] # if filename is None and filedata is None:", "curr_index == len(queue_jobs): curr_index = 0 if curr_worker == len(worker_ids): curr_worker = 0", "receptor name def set_mgltools_path(self, path): print('MGLTools path is set to ', path) self.mgltools_utilities", "self.state_data['energySoftware']['g_mmpbsa']['prepare'] # self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) # self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) # self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) # self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list})", "the binding-site # Float in interval [0.1,1.0] SPACER 0.375 # Exclude hetero groups", "dask server # # # # # TODO this works need to create", "folder and process structure for docking using lepro # ./lepro_linux_x86 LasR_flexaid.pdb os.chdir(self.ledock_absolute_folder_name) command_receptor", "# maybe 2 async threads, one checks finished simulations, other submits jobs ###############################################################################################", "Exception as error: pass finished_jobs.append(job) finished_jobs_dict.update({i: True}) results = job.result() test = 1", "for them to be considered as in the same neighborhood. >>> LasR_MOR_mmpbsa_calc =", "curr_LeDock = 0 # very slow # while len(run_docking_queue) != 40: # run_docking_queue", "self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) # self.directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.rdock_folder_name) # print('TADA ', self.directories)", "# mdp_abs= full_g_mmpbsa_data['mdpFile'] mdp_file = open(mdp_abs, 'r') mdp_mem = mdp_file.read() mdp_filename = mdp_abs.split(os.sep)[-1]", "print('Do not prep files') return 'Do not prep files' traj_len = len(self.trajectory_mdtraj) import", "load initial LeDock try: self.mdtraj_frames = self.state_data['energySoftware']['g_mmpbsa']['frames'] self.mdtraj_parts = self.state_data['energySoftware']['g_mmpbsa']['parts'] self.file_save_list = self.state_data['energySoftware']['g_mmpbsa']['fileList']", "is set to ', path) self.flexaid_path = path def set_ledock_path(self, path): print('LeDock path", "error: pass finished_jobs.append(job) finished_jobs_dict.update({i: True}) results = job.result() test = 1 try: key", "+ '_' + str(sample_num) command_to_run = \"vina --receptor {0} \" \\ \"--ligand {1}", "= self.state_data['trajectoryFile'] self.mdp_file = self.state_data['mdpFile'] self.tpr_file = self.state_data['tprFile'] self.index_file = self.state_data['indexFile'] self.folder_path =", "# # @file __init__.py # @brief G_MMPBSA DASK PROJECT # @author <NAME> #", ">>> LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True) >>> # >>> # LasR_MOR_mmpbsa_calc.run_dask_docking(client) Notes ----- See examples/cluster/plot_dbscan.py for an", "if prog == 'g_mmpbsa': sample_num = results[key]['part_num'] results_dask = results[key]['dask'] original_data = self.state_data['energySoftware'][prog]", "= [] finished_jobs_dict = {} worker_status_free = None test = 1 # maybe", "disabled # To include water molecules, uncomment the next line #INCHOH # Permeability", "THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF", "# task = client.submit(run_dask_tools.upload_g_mmpbsa_files_dask, # big_future, # workers=[worker_address], # key='key_scatter_{0}'.format(worker_address), # retries=retries_num) #", "'GA_DataName': self.generate_ga_dat, 'GA_DATA_Abs': self.generate_ga_dat_name_abs, 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() self.save_state_data_json() # TODO", "[1.0-30.0] VARFLX 10.0 # Use Vcontacts in the calculations of surfaces in contact", "yolo') # # test = 1 def prepare_g_mmpbsa(self): ''' Prepare g_mmpbsa run folder", "or without # modification, are permitted provided that the following conditions are #", "\"--log {10}.txt \" \\ \"--out {11}_out.pdbqt\".format(self.receptor_file, self.ligand_file, self.x_center, self.y_center, self.z_center, self.x_size, self.y_size, self.z_size,", "Exception as e: # print(\"error in Sample runSim: \", e) # sys.exit(0) def", "McConkey et al. (2002) Bioinformatics. 18(10); 1365-1373 VCTPLA R # Use normalized surfaces", "in get_worker_free: # preped = get_worker_free[worker]['preped'] # total_free_cores += preped['freeCores'] if prep_g_mmpbsa is", "z_center=self.z_center) else: print('state has beeen loaded \\n') ############################################################################## def flexaid_generate_ga_dat_parameters(self): ''' Generate GA", "= mdp_file.read() mdp_filename = mdp_abs.split(os.sep)[-1] index_abs= full_g_mmpbsa_data['indexFile'] index_file = open(index_abs, 'r') index_mem =", "= modified_submitted_jobs_dask.pop(i) try: if finished_jobs_dict[i] is True: continue except Exception as error: pass", "'_' + self.molecule_name + '_' + self.run_type_samples + '.json' # This will hold", "job_test = queue_jobs[0] # # result = run_dask_tools.run_gmmpbsa_using_dask(job_test) test = 1 # Local", "\" \\ \"--log {10}.txt \" \\ \"--out {11}_out.pdbqt\".format(self.receptor_file, self.ligand_file, self.x_center, self.y_center, self.z_center, self.x_size,", "in i: VIP.append(i) # This is not necessary since info is inside pdbqt", "# print sorted(dir_names) return sorted(dir_names) except Exception as e: print(\"Problem with finding folders", "= 1 # print('Last Check of submitted jobs') while len(finished_jobs) != job_quantity: finished_jobs,", "0 and len(submitted_jobs_dask) == 1: curr_index = 0 else: curr_index += 1 curr_worker", "= 1 def prepare_g_mmpbsa(self): ''' Prepare g_mmpbsa run folder and initial json configuration", "= self.state_data_samples['samples_exhaust'] self.sim_folder_run_samples = self.state_data_samples['simRunFolder'] # .split('/')[-1] self.directories_samples = self.state_data_samples['directory'] self.setup_box = self.state_data_samples['setup']", "Define the RMSD cutoff between clusters # Float in interval [0.5,3.0] CLRMSD 2.0", "think about seed self.save_run_name = 'vina_' + self.run_type_samples + '_' + str(sample_num) command_to_run", "get_worker_free_temp = run_dask_tools.check_free_resources(worker_status_temp) custom_index_curr += 2 print('----------------TEST------------------') curr_item = queue_jobs[curr_index] test = 1", "EPI_folder + os.sep + 'EPI.pdb' >>> molname = 'EPI' >>> receptor_name = 'LasR'", "results_dask['out_filename'] out_mem = results_dask['out_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND", "= self.state_data['energySoftware']['g_mmpbsa']['parts'] self.file_save_list = self.state_data['energySoftware']['g_mmpbsa']['fileList'] self.abs_file_save_list = self.state_data['energySoftware']['g_mmpbsa']['absFileList'] self.simStates = self.state_data['energySoftware']['g_mmpbsa']['simStates'] test =", "temp_traj.save(abs_file_save) temp_state[str(i)].update({'fileSaved': True }) simStates['simStates'].update(temp_state) self.mdtraj_frames = select_frames self.mdtraj_sliced = temp_mdtraj self.mdtraj_parts =", "self.state_data_samples['directory'] self.setup_box = self.state_data_samples['setup'] self.folder_exists = self.state_data_samples['folderCreated'] self.x_center = self.state_data_samples['boxSettings']['center_x'] self.y_center = self.state_data_samples['boxSettings']['center_y']", "considered as in the same neighborhood. >>> LasR_MOR_mmpbsa_calc = g_mmpbsa_dask.GMMPBSAObject(traj, topol_file, tpr_file, mdp_file,", "len(self.trajectory_mdtraj) import math # Free core approach div_traj = math.ceil(traj_len/total_free_cores) # select_indexes =", "OF THE POSSIBILITY OF SUCH DAMAGE. # ------------------------------------------------------------------------ --> import itertools import time", "main program submitted_jobs = [] submitted_jobs_dask = [] queue_jobs = self.run_mmpbsa_dask job_quantity =", "probabilities # Floats in interval [0.0,1.0] ADAPTKCO 0.95 0.10 0.95 0.10 # Constant", "hash=False) big_future = pop_item task_g_mmpbsa = client.submit(run_dask_tools.run_gmmpbsa_using_dask, big_future, workers=[workstation_address], key=run_name, retries=retries_num) submitted_jobs_dask.append(task_g_mmpbsa) else:", "= True @hlp.timeit def prep_LeDock_dock_command(self, sample_num, pose_gen=20): ''' prepare each separate rDock run", "of information or not? def load_samples_state_data_json(self, filename): ''' :param filename: load json state", "= self.json_samples_state_file self.load_state_called_samples = True self.load_samples_state_data_json(self.load_state_file_samples) self.prep_sample_run = True def get_exhaust_run_folder_name(self): curr_folder =", "Radical and Extended radical plane # See McConkey et al. (2002) Bioinformatics. 18(10);", "= open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contrib_apol_filename'] out_mem", "molmolpy.utils import folder_utils import json from molmolpy.utils import helper as hlp # matplotlib.style.use('ggplot')", "Local upload test # big_future = self.dask_prep # run_dask_tools.upload_g_mmpbsa_files_dask(big_future) #TODO # Scatter a", "# results_dask = results[key]['dask'] # else: # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)] = results[key] # if filename", "COMPLF VCT # Do not consider intramolecular interactions NOINTR # Side-chain rotamer acceptance", "SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,", "'abs_ligand_mol2': self.absolute_ligand_ledock_mol2, 'LeDockFolderStatus': self.ledock_folder_exists, 'LeDockAbsFolder': self.ledock_absolute_folder_name, 'LeDockFolderName': self.ledock_folder_name}) self.save_state_data_json() self.load_state_called = False self.ledock_title", "results[key]['Program'] # need [0] key sample_num = results[key]['part_num'] if prog == 'g_mmpbsa': sample_num", "NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY", "test = 1 self.g_mmpbsa_prepared = self.state_data['energySoftware']['g_mmpbsa']['prepare'] # self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) # self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) #", "2 # # task = client.submit(run_dask_tools.run_vina_using_dask, # # data, # # workers=[work_address], #", "EPI_folder = '/media/Work/MEGA/Programming/StressHormones/dock_EPI' >>> EPI_samples = '/media/Work/MEGA/Programming/StressHormones/' >>> >>> >>> receptor_file = EPI_folder", "jobs') # TODO should I add json saving of information or not? def", "multiprocessing import mdtraj as md from molmolpy.utils.cluster_quality import * from molmolpy.utils import folder_utils", "= index_abs.split(os.sep)[-1] # data_pre = self.state_data['energySoftware']['g_mmpbsa'] # data_pre.update({'dask': {}}) data_pre = {} data_pre.update({'tprName':tpr_filename,", "# self.ledock_num_samples = self.ledock_data['num_samples'] # self.ledock_input_info = self.ledock_data['LeDockInputInfo'] # self.param_ledock_template = self.ledock_data['paramFull'] #", "or seperately >>> EPI_uber_dock.calculate_max_radius_from_com() >>> EPI_uber_dock.calculate_cube_edges() >>> EPI_uber_dock.calculate_box_edges_from_com() >>> >>> >>> EPI_uber_dock.prepare_uber_docker() >>>", "despite including flexible side-chains #SCOLIG # Ends reading of CONFIG file ENDINP '''", "processed target file to use pdbnam = 'PDBNAM ' + '{0}\\n\\n'.format( self.receptor_flexaid_mol2) #", "not? def load_state_data_json(self, filename): ''' :param filename: load json state data :return: '''", "# for worker in get_worker_free: # preped = get_worker_free[worker]['preped'] # total_free_cores += preped['freeCores']", "select_indexes = list(range(len(select_frames))) folder_to_save = self.g_mmpbsa_folder temp_mdtraj = [] temp_mdtraj_indexes = [] file_save_list", "#TODO # Scatter a lot better using scatter for big files for upload", "# try: # self.setup_ledock_pameters = self.ledock_data['setup_LeDock'] # self.ledock_num_samples = self.ledock_data['num_samples'] # self.ledock_input_info =", "True test = 1 # TODO enter ledock folder and process structure for", "dir_names.append(dirname) # print sorted(dir_names) return sorted(dir_names) except Exception as e: print(\"Problem with finding", "this part for a while >>> EPI_uber_dock.prepare_rdock_settings() >>> EPI_uber_dock.generate_rdock_cavity() >>> # Prepare and", "threshold # Float in interval [0.0-1.0] DEECLA 0.8 # Use instances of side-chain", "file' self.ledock_rmsd = 0.5 self.set_up_ledock_dock_blind_parameters(title=self.ledock_title, receptor_file=self.lepro_pdb_file, ledock_rmsd=self.ledock_rmsd, x_center=self.x_center, y_center=self.y_center, z_center=self.z_center) else: print('state has", "for leDock # if prep_g_mmpbsa is True: # # self.prepare_uber_docker() # self.prepare_ledock_settings() #", "Penultimate Rotamer Library #ROTOBS # Defines the grid spacing of the binding-site #", "# # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param': self.generate_ga_dat_pameters, # 'GA_DataName': self.generate_ga_dat,", "In our example, Biotin has 5 flexible bonds flexible_bonds_data = open( self.flexaid_absolute_processed_files_folder +", "self.ledock_absolute_folder_name, 'LeDockFolderName': self.ledock_folder_name}) self.save_state_data_json() self.load_state_called = False self.ledock_title = self.receptor_name + '_' +", "flexible dihedrals of the ligand # Float in interval [1.0-30.0] VARFLX 10.0 #", "i, id in enumerate(get_worker_free): worker_ids.update({i: id}) custom_index_curr = 3 while len(queue_jobs) > 0:", "out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contribMM_filename']", "self.file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) except: print('G_mmpbsa is empty verify yolo') #", "if you wish to obtain the whole complex SCOOUT # Only calculate the", "self.ledock_data['LeDock_params']['title'] # # self.receptor_file_ledock = self.ledock_data['LeDock_params']['receptorFile'] # self.ledock_rmsd = self.ledock_data['LeDock_params']['LeDockRMSD'] # # self.ledock_xmin", "state run # filename = self.sim_folder_run + os.sep + self.receptor_name + '_' +", "full_g_mmpbsa_data['indexFile'] index_file = open(index_abs, 'r') index_mem = index_file.read() index_filename = index_abs.split(os.sep)[-1] # data_pre", "is empty verify yolo') # # try: # self.ledock_param_title = self.ledock_data['LeDock_params']['title'] # self.rdock_title", "empty verify yolo') # # try: # self.ledock_param_title = self.ledock_data['LeDock_params']['title'] # self.rdock_title =", "# filename = self.json_state_file filename = self.absolute_json_state_file filedata = self.state_data # elif filedata", "= False self.g_mmpbsa_prepared = False # This part needs clarification self.prep_mdtraj_object() # original", "3 while len(queue_jobs) > 0: if curr_index == len(queue_jobs): curr_index = 0 if", "self.abs_file_save_list = self.state_data['energySoftware']['g_mmpbsa']['absFileList'] self.simStates = self.state_data['energySoftware']['g_mmpbsa']['simStates'] test = 1 self.g_mmpbsa_prepared = self.state_data['energySoftware']['g_mmpbsa']['prepare'] #", "scored atoms in the final results # Comment the next line if you", "self.save_state_data_json(filedata=self.state_data_samples, filename=self.json_samples_state_file) self.load_state_called_samples = False self.prep_sample_run = True else: self.load_state_file_samples = self.json_samples_state_file self.load_state_called_samples", "+ self.molecule_name + '_' + self.run_type self.sim_folder_run = self.folder_path + os.sep + self.run_folder_name", "topol self.tpr_file = tpr_file self.mdp_file = mdp_file self.index_file = index_file self.first_index = first_index", "approach div_traj = math.ceil(traj_len/total_free_cores) # select_indexes = list(range(total_free_cores)) # Maximum parallel #div_traj =", "states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() if len(self.ledock_directories) == 0: print('Creating rdock folder in", "self.receptor_file, 'ligandFile': self.ligand_file, 'exhaustivenessList': self.exhaustiveness, 'samples_exhaust': self.samples_exhaust, 'samplesList': self.samples_run, 'folderPath': self.folder_path, 'runType': self.run_type_samples,", "Redistributions in binary form must reproduce the above # copyright notice, this list", "permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #", "for ligand :return: ''' # self.output_receptor_rdock = Outputfile(\"mol2\", \"{0}.mol2\".format(self.receptor_name)) # self.output_receptor_rdock.write(self.receptor_pybel) # self.output_receptor_rdock.close()", "DAMAGE. # ------------------------------------------------------------------------ --> import itertools import time color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue',", "# test = 1 def prepare_g_mmpbsa(self): ''' Prepare g_mmpbsa run folder and initial", "try: # os.system(command_to_run) # except KeyboardInterrupt: # # quit # sys.exit() print(\"Vina sample", "run_mmpbsa_queue = [] # Prepare outputs import copy self.before_dask = copy.deepcopy(self.state_data) ################################################################################ if", "# This will hold information about run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() ########################################################################################", "OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR", ">>> self.run_uber_dock_protocol() or seperately >>> EPI_uber_dock.calculate_max_radius_from_com() >>> EPI_uber_dock.calculate_cube_edges() >>> EPI_uber_dock.calculate_box_edges_from_com() >>> >>> >>>", "of parsers can be used: 1.molmolpy 2. pybel Stores molecule information in pandas", "'.json' # This will hold information about run states self.g_mmpbsa_folder = self.get_uber_g_mmpbsa_run_folder_name() self.absolute_json_state_file", "self.mdtraj_frames}) # self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) # self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) # self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list})", "'jsonStates': self.json_state_file, 'runType': self.run_type, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run, 'RunFolder': self.g_mmpbsa_folder, 'absoluteJsonStates':", "sns.set(style=\"darkgrid\") low_seed = 1 high_seed = 999999999 mgltools_utilities = '/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24' class GMMPBSAObject(object): \"\"\"", "True: continue except Exception as error: pass finished_jobs.append(job) finished_jobs_dict.update({i: True}) results = job.result()", "ligand (0) optimz2 = 'OPTIMZ 9999 {0} 0\\n\\n'.format(self.flexaid_res_chain) # Add one extra line", "= os.path.abspath(filename) self.load_state_called_samples = True print(os.path.abspath(__file__)) self.state_data_samples = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.receptor_file", "part needs to be thought out #################################################################################################################### def prepare_samples_collection_run(self, standard_exhaust=128, num_samples_run=100, run_type='samples_run'): if", "os.walk(folder_path): # print(dirname, '-') if dir_name in dirname: # # print(dir_name) dir_names.append(dirname) #", "{}} self.objects_loaded = False self.g_mmpbsa_prepared = False # This part needs clarification self.prep_mdtraj_object()", "as error: pass finished_jobs.append(job) finished_jobs_dict.update({i: True}) results = job.result() test = 1 try:", "\\ \"--seed 10 \" \\ \"--log {10}.txt \" \\ \"--out {11}_out.pdbqt\".format(self.receptor_file, self.ligand_file, self.x_center,", "self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) except: print('G_mmpbsa is empty verify yolo')", "# os.system(command_to_run) # except KeyboardInterrupt: # # quit # sys.exit() print(\"Vina sample run", "self.second_index = second_index self.prep_g_mmpbsa_run = False self.folder_exists = False # Running vina,whether it's", ">>> ligand_file = EPI_folder + os.sep + 'EPI.pdb' >>> molname = 'EPI' >>>", "finished_jobs_dict.update({i: True}) results = job.result() test = 1 try: key = list(results.keys())[0] prog", "self.state_data['RunFolder'] self.json_state_file = self.state_data['jsonStates'] test = 1 # self.rdock_folder_name = self.receptor_name + '_'", "pose_gen=20): ''' prepare each separate rDock run command :param sample_num: :param pose_gen: default", "self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) test = 1 ###################################################3 # update index # print(curr_item) #", "# Need to check whteter lepro ran fine print('Updated receptor with LePro\\n') os.chdir(self.uber_dock_folder)", "results[key] update_results = copy.deepcopy(results) update_results[key].pop('dask', None) # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = results[key] # self.state_data['energySoftware'][prog]['simStates'][str(sample_num)]", "- LeDock-rDock-FlexAid run_g_mmpbsa = [] run_mmpbsa_queue = [] # Prepare outputs import copy", "TODO article Pagadala Software for molecular docking: a review # This will be", "interval [0.0,1.0] # Only considered when ADAPTVGA is 0 CROSRATE 0.90 # Constant", "TODO this part needs to be thought out #################################################################################################################### def flexaid_generate_ga_dat_parameters_dask(self): ''' Generate", "acceptance # Float in interval [0.0,1.0] from fully permeable to no permeability ROTPER", "line #EXCHET # Include water molecules in the target (always removed by default)", "of submitted jobs') while len(finished_jobs) != job_quantity: finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict)", "Usage example >>> EPI_folder = '/media/Work/MEGA/Programming/StressHormones/dock_EPI' >>> EPI_samples = '/media/Work/MEGA/Programming/StressHormones/' >>> >>> >>>", "'{0}.inp'.format(self.ligand_flexaid_initials), 'r') flexible_bonds_data_text = flexible_bonds_data.read() flexible_bonds_data.close() flexible_bonds_data_text_list = flexible_bonds_data_text.split('\\n') flexible_index_list_phrases = [] flexible_index_list", "= \"{0}.pdb\".format(self.receptor_name) self.ligand_ledock_mol2 = \"{0}.mol2\".format(self.ligand_name) self.absolute_receptor_ledock_pdb = self.ledock_absolute_folder_name + os.sep + self.receptor_ledock_pdb self.absolute_ligand_ledock_mol2", "in interval [1,N] where N is NUMCHROM PRINTCHR 10 ''' self.generate_ga_dat_pameters = True", "when ADAPTVGA is 0 CROSRATE 0.90 # Constant mutation probability # Float in", "out #################################################################################################################### def flexaid_generate_ga_dat_parameters_dask(self): ''' Generate GA dat parameters for flexaid docking :return:", "'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() ############################################################################################## def flexaid_generate_config_input_dask(self): ''' Generate flexaid config", "folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name) print('TADA ', self.directories) self.json_state_file = self.sim_folder_run + os.sep + self.receptor_name +", "+= inplig final_str += rgnopt_locclf final_str += optimz1 final_str += optimz2 for y", "need [0] key sample_num = results[key]['part_num'] if prog == 'g_mmpbsa': sample_num = results[key]['part_num']", "= os.getcwd() print(\"Yippie yi kay\", curr_folder) return curr_folder + os.sep + self.run_folder_name_samples def", "= out_pdbqt_name # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = \\ # results[key] update_results = copy.deepcopy(results) update_results[key].pop('dask',", "data test = 1 run_g_mmpbsa.append(data) # # result = run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num, data) # #", "for 1 sec print('Ok %s secs have pass' % (n)) @hlp.timeit def prepVinaSampleCommand(self,", "AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED", "path def prep_mdtraj_object(self): ''' Prepare receptor mdtraj object get mdtraj topology and save", "temp_traj = self.trajectory_mdtraj[traj:traj+div_traj] temp_mdtraj.append(temp_traj) temp_mdtraj_indexes.append(i) file_save = 'traj_part{0}.xtc'.format(i) abs_file_save = folder_to_save + os.sep", "is not False: # print(\"Running Vina\") # TODO need to think about seed", "os.sep + 'EPI.pdb' >>> molname = 'EPI' >>> receptor_name = 'LasR' >>> >>>", "self.ledock_data['LeDock_params']['title'] # self.rdock_title = self.ledock_data['LeDock_params']['title'] # # self.receptor_file_ledock = self.ledock_data['LeDock_params']['receptorFile'] # self.ledock_rmsd =", "0.10 # Constant crossover probability # Float in interval [0.0,1.0] # Only considered", "i in filenames: # print i if 'out' in i: VIP.append(i) # This", "average number of neighbors, while original DBSCAN had memory complexity O(n). Sparse neighborhoods", "max_jobs_to_run=10): # from molmolpy.moldock import run_dask_tools from molmolpy.tools import run_dask_tools test = 1", "999999999 mgltools_utilities = '/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24' class GMMPBSAObject(object): \"\"\" Usage example >>> EPI_folder = '/media/Work/MEGA/Programming/StressHormones/dock_EPI'", "if self.g_mmpbsa_prepared is True: print('Do not prep files') return 'Do not prep files'", "\"--seed 10 \" \\ \"--log {10}.txt \" \\ \"--out {11}_out.pdbqt\".format(self.receptor_file, self.ligand_file, self.x_center, self.y_center,", ">>> #This is for FlexAid >>> EPI_uber_dock.prepare_flexaid_settings() >>> EPI_uber_dock.process_flexaid_ligand() >>> EPI_uber_dock.get_flexaid_clefts() >>> EPI_uber_dock.flexaid_generate_ga_dat_parameters()", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES", "save to json :param num_samples: test value 6 :return: ''' try: self.g_mmpbsa_sim_states =", "self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)] = results[key] # if filename is None and filedata is None: #", "file_save = 'traj_part{0}.xtc'.format(i) abs_file_save = folder_to_save + os.sep + file_save file_save_list.append(file_save) abs_file_save_list.append(abs_file_save) temp_state[str(i)].update({'runFinished':False,", "get mdtraj topology and save as pandas dataframe Calculate pdb receptor center of", "+ self.receptor_name + '_' + self.molecule_name + '_' + self.run_type + '.json' #", "population to create # Only considered when REPMODEL is BOOM BOOMFRAC 1.0 #", "# Do not consider intramolecular interactions NOINTR # Side-chain rotamer acceptance threshold #", "water molecules, uncomment the next line #INCHOH # Permeability allowed between atoms #", "'vina_' + self.run_type_samples + '_' + str(sample_num) command_to_run = \"vina --receptor {0} \"", "Floats in interval [0.0,1000.0] SHAREALF 4.0 SHAREPEK 5.0 SHARESCL 10.0 # Reproduction model", "parsing pdb or pdbqt file. 2 types of parsers can be used: 1.molmolpy", "is STEADY # Integer in interval [1,N-1] where N is NUMCHROM STEADNUM 950", "self.setup_ledock_pameters = self.ledock_data['setup_LeDock'] # self.ledock_num_samples = self.ledock_data['num_samples'] # self.ledock_input_info = self.ledock_data['LeDockInputInfo'] # self.param_ledock_template", "the CF for ligand atoms despite including flexible side-chains #SCOLIG # Ends reading", "with ``mode='distance'``. References ---------- \"\"\" def __init__(self, traj, topol, tpr_file, mdp_file, index_file, first_index,", "= open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contrib_pol_filename'] out_mem", "job in enumerate(submitted_jobs_dask): status = job.status if status == 'finished': test = 1", "folder is buggy workstation_dir = original_get_worker_free[curr_worker_id]['preped']['workerDir'] workstation_freemem = workstation_preped_temp['freeMemory'] workstation_freecpu = workstation_preped_temp['freeCores'] curr_item_prog", "client.scatter(pop_item, workers=[workstation_address], hash=False) big_future = pop_item task_g_mmpbsa = client.submit(run_dask_tools.run_gmmpbsa_using_dask, big_future, workers=[workstation_address], key=run_name, retries=retries_num)", "full_g_mmpbsa_data = self.state_data['energySoftware']['g_mmpbsa'] test = 1 tpr_abs= full_g_mmpbsa_data['tprFile'] tpr_file = open(tpr_abs, 'rb') tpr_mem", "save_state_data_json(self, filedata=None, filename=None): ''' :param filename: Saves state file :return: ''' # import", "+ self.molecule_name + '_' + self.run_type + '.json' # This will hold information", "had memory complexity O(n). Sparse neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with", "1 ###################################################3 # update index # print(curr_item) # How to save submitted jobs", "self.folder_exists = True programs_dict = {'energySoftware': {'g_mmpbsa': {}}} self.state_data.update({'trajectoryFile': self.trajectory_file, 'mdpFile': self.mdp_file, 'tprFile':", "# result = run_dask_tools.run_gmmpbsa_using_dask(job_test) test = 1 # Local upload test # big_future", "print('Creating folder for g_mmpbsa run\\n') print(self.sim_folder_run) folder_utils.create_folder(self.sim_folder_run) self.folder_exists = True programs_dict = {'energySoftware':", "1 curr_worker_id = worker_ids[curr_worker] workstation_info_temp = get_worker_free_temp[curr_worker_id] workstation_preped_temp = workstation_info_temp['preped'] workstation_address = workstation_preped_temp['workerAddress']", "= {'energySoftware': {'g_mmpbsa': {}}} self.state_data.update({'trajectoryFile': self.trajectory_file, 'mdpFile': self.mdp_file, 'tprFile': self.tpr_file, 'indexFile': self.index_file, 'runFolderName':", "test = 1 # TODO enter ledock folder and process structure for docking", "clients client.upload_file # task = client.submit(run_dask_tools.upload_g_mmpbsa_files_dask, # big_future, # workers=[worker_address], # key='key_scatter_{0}'.format(worker_address), #", "of the ligand (0) optimz2 = 'OPTIMZ 9999 {0} 0\\n\\n'.format(self.flexaid_res_chain) # Add one", "update_results[key].pop('dask', None) # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = results[key] # self.state_data['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)] =", "tpr_abs= full_g_mmpbsa_data['tprFile'] tpr_file = open(tpr_abs, 'rb') tpr_mem = tpr_file.read() tpr_filename = tpr_abs.split(os.sep)[-1] #", "provided that the following conditions are # met: # * Redistributions of source", "endorse or promote products derived # from this software without specific prior written", "self.run_mmpbsa_dask = final_queue_job # random.shuffle(self.run_docking_queue) print('Finished preparing g_mmpbsa jobs') # TODO should I", "rDock,FlexAid, Vina :return: ''' current_pid = multiprocessing.current_process().pid print(\"Main Process with PID:{}\".format(current_pid)) # free_threads_for_Vina", "in i: # VIP.append(i) return VIP except Exception as e: print(\"error in find_files:", "run_mmpbsa_queue = run_g_mmpbsa # run_docking_queue = run_docking_LeDock + run_docking_FlexAid + run_docking_Vina final_queue_job =", "@brief G_MMPBSA DASK PROJECT # @author <NAME> # # <!-------------------------------------------------------------------------- # # Copyright", "[] for dirname, dirnames, filenames in os.walk(folder_path): # print(dirname, '-') if dir_name in", "ligand_clear_dok) print(command_to_run) self.LeDock_command_run_list.append(command_to_run) print(\"Launching new Sim\") temp_dict = {str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run,", "'runFolderName': self.run_folder_name, 'folderPath': self.folder_path, 'jsonStates': self.json_state_file, 'runType': self.run_type, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder':", "self.receptor_file = self.state_data_samples['receptorFile'] self.ligand_file = self.state_data_samples['ligandFile'] self.exhaustiveness = self.state_data_samples['exhaustivenessList'] self.samples_run = self.state_data_samples['samplesList'] self.folder_path", "# print(dir_name) dir_names.append(dirname) # print sorted(dir_names) return sorted(dir_names) except Exception as e: print(\"Problem", "+ os.sep + results_dask['contribMM_filename'] out_mem = results_dask['contribMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close()", "# VIP.append(i) return VIP except Exception as e: print(\"error in find_files: \", e)", "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA,", "self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex': self.first_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex': self.second_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile': self.index_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile': self.mdp_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile': self.tpr_file}) self.save_state_data_json() test", "results_dask['polar_filename'] out_mem = results_dask['polar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder", "dock. in command_receptor = self.ledock_path + os.sep + 'ledock_linux_x86' sample_data = self.ledock_input_info[str(sample_num)] parm_name", "STEADNUM 950 # Number of TOP individuals to print in console # Integer", "= index_file self.first_index = first_index self.second_index = second_index self.prep_g_mmpbsa_run = False self.folder_exists =", "import itertools import time color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold', 'darkorange']) import multiprocessing", "e: print(\"error in runSim: \", e) sys.exit(0) @hlp.timeit def check_dask_jobs(self, submitted_jobs_dask, finished_jobs, finished_jobs_dict):", "= len(queue_jobs) finished_jobs = [] finished_jobs_dict = {} worker_status_free = None test =", "self.ledock_data['LeDock_params']['LeDockRMSD'] # # self.ledock_xmin = self.ledock_data['LeDock_params']['xmin'] # self.ledock_xmax = self.ledock_data['LeDock_params']['xmax'] # self.ledock_ymin =", "{7} \" \\ \"--exhaustiveness {8} \" \\ \"--num_modes {9} \" \\ \"--seed 10", "''' flexaid_config_input_template = '''# Optimization method (genetic-algorithms) METOPT GA # The variation in", "= run_dask_tools.get_dask_worker_status(curr_client) get_worker_free = run_dask_tools.check_free_resources(worker_status) import copy original_get_worker_free = copy.deepcopy(get_worker_free) # TEST IT", "load json state data :return: ''' # self.absolute_path = os.path.abspath(filename) self.load_state_called = True", "client.submit(run_dask_tools.run_gmmpbsa_using_dask, big_future, workers=[workstation_address], key=run_name, retries=retries_num) submitted_jobs_dask.append(task_g_mmpbsa) else: key_name = curr_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name,", "= select_frames self.mdtraj_sliced = temp_mdtraj self.mdtraj_parts = temp_mdtraj_indexes self.file_save_list = file_save_list self.abs_file_save_list =", "= self.state_data_samples['boxSettings']['size_x'] self.y_size = self.state_data_samples['boxSettings']['size_y'] self.z_size = self.state_data_samples['boxSettings']['size_z'] self.num_modes = self.state_data_samples['boxSettings']['numModes'] def hold_nSec(self,", "finished_jobs.append(job) finished_jobs_dict.update({i: True}) results = job.result() test = 1 try: key = list(results.keys())[0]", "'{0}.inp\\n\\n'.format( self.ligand_flexaid_initials) # Specify to use one or multiple cleft(s) as binding-site rgnopt_locclf", "must retain the above copyright # notice, this list of conditions and the", "self.file_save_list = self.state_data['energySoftware']['g_mmpbsa']['fileList'] self.abs_file_save_list = self.state_data['energySoftware']['g_mmpbsa']['absFileList'] self.simStates = self.state_data['energySoftware']['g_mmpbsa']['simStates'] test = 1 self.g_mmpbsa_prepared", "+ os.sep + self.lepro_pdb_file, 'abs_receptor_pdb': self.absolute_receptor_ledock_pdb, 'abs_ligand_mol2': self.absolute_ligand_ledock_mol2, 'LeDockFolderStatus': self.ledock_folder_exists, 'LeDockAbsFolder': self.ledock_absolute_folder_name, 'LeDockFolderName':", "EPI_samples = '/media/Work/MEGA/Programming/StressHormones/' >>> >>> >>> receptor_file = EPI_folder + os.sep + 'centroid_model_clust2.pdbqt'", "curr_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Passed running ', run_name) # submitted_jobs_dask_temp, finished_jobs_temp =", "* Redistributions in binary form must reproduce the above # copyright notice, this", "the target (water,metal,modified amino acids,cofactors,ligands) # To exclude these groups, uncomment the next", "filename is None and filedata is None: # # filename = self.json_state_file #", "= self.receptor_name + '_' + self.molecule_name + '_' + 'rDock' # self.rdock_absolute_folder_name =", "# except KeyboardInterrupt: # # quit # sys.exit() print(\"Vina sample run command prep", "ligand :return: ''' # self.output_receptor_rdock = Outputfile(\"mol2\", \"{0}.mol2\".format(self.receptor_name)) # self.output_receptor_rdock.write(self.receptor_pybel) # self.output_receptor_rdock.close() #", "print('Please setup simulation box') sys.exit(0) self.run_type_samples = run_type self.prep_samples_run = True self.samples_exhaust =", "filenames in os.walk(folder_path): # print(dirname, '-') if dir_name in dirname: # # print(dir_name)", "of chromosomes (number individuals in the population) # Integer in interval [1-N] NUMCHROM", "= folder_utils.find_folder_in_path(self.uber_dock_folder, self.ledock_folder_name) print('TADA ', self.ledock_directories) test = 1 # This will hold", "import json # with open(filename, 'w') as outfile: # json.dump(self.cluster_models, outfile) # pickle.dump(self.cluster_models,", "while >>> EPI_uber_dock.prepare_rdock_settings() >>> EPI_uber_dock.generate_rdock_cavity() >>> # Prepare and run Dock programs >>>", "# Float in interval [0.1,1.0] SPACER 0.375 # Exclude hetero groups in the", "Vcontacts indexing VINDEX # Vcontacts plane definition # Value in [B,R,X] for Bissecting,", "CHECK FOLDER HERE # #big_future = client.scatter(pop_item, workers=[workstation_address], hash=False) big_future = pop_item task_g_mmpbsa", "to obtain the whole complex SCOOUT # Only calculate the CF for ligand", "self.state_data # elif filedata is not None: # filedata = filedata # filename", "Float in interval [0.0,1.0] from fully permeable to no permeability PERMEA 0.9 #", "# Value in [LINEAR,PSHARE] FITMODEL PSHARE # Parameters of the shared fitness function", "print('TADA ', self.directories_samples) self.json_samples_state_file = self.sim_folder_run_samples + os.sep + self.receptor_name + '_' +", "self.dask_prep = data_pre for part_num in full_g_mmpbsa_data['parts']: # self.run_FlexAid_sim(FlexAid_sample_num, waitTime=waitTime) data = self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)]", "self.state_data['jsonStates'] test = 1 # self.rdock_folder_name = self.receptor_name + '_' + self.molecule_name +", "curr_folder = os.getcwd() return curr_folder + os.sep + self.run_folder_name def prepare_g_mmpbsa_dask_protocol(self, dask_client=None, prep_g_mmpbsa=True):", "1 ################################################################################################### test = 1 #################################################################################################### self.run_mmpbsa_dask = run_g_mmpbsa curr_LeDock = 0 #", "# self.output_ligand_rdock.close() self.ledock_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + 'LeDock'", "LePro\\n') os.chdir(self.uber_dock_folder) self.state_data['dockSoftware']['LeDock'].update( {'receptor_pdb': self.receptor_ledock_pdb, 'ligand_mol2': self.ligand_ledock_mol2, 'lepro_pdb': self.lepro_pdb_file, 'lepro_abs_pdb': self.ledock_absolute_folder_name + os.sep", "maximum distance between two samples for them to be considered as in the", "if curr_index == 0: curr_index = 0 else: curr_index -= 1 pop_item.update({'workingDir':workstation_dir}) submitted_jobs.append(pop_item)", "this part needs to be thought out #################################################################################################################### def flexaid_generate_ga_dat_parameters_dask(self): ''' Generate GA", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\"", "self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() ############################################################################################## def flexaid_generate_config_input_dask(self): ''' Generate flexaid config input", "run command prep finished\") else: print('Please setup vina box settings') # except Exception", "self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)] save_run_name = \"g_mmpbsa_part_{0}\".format(part_num) data.update({'Program': 'g_mmpbsa'}) data.update({'part_num': part_num}) data.update({'save_run_name': save_run_name}) data.update({'dask': {}}) traj_abs", "or not? def load_samples_state_data_json(self, filename): ''' :param filename: load json state data :return:", "MOR_flexaid.dok command_to_clean = \"{0} -spli {1}\".format(command_receptor, ligand_clear_dok) print(command_to_run) self.LeDock_command_run_list.append(command_to_run) print(\"Launching new Sim\") temp_dict", "flexible_index_list = [] for i in flexible_bonds_data_text_list: if 'FLEDIH' in i: print(i) temp", "used to endorse or promote products derived # from this software without specific", "= self.run_mmpbsa_dask # job_test = queue_jobs[0] # # result = run_dask_tools.run_gmmpbsa_using_dask(job_test) test =", "0: if curr_index == len(queue_jobs): curr_index = 0 if curr_worker == len(worker_ids): curr_worker", "# # <!-------------------------------------------------------------------------- # # Copyright (c) 2016-2019,<NAME>. # All rights reserved. #", "', run_name) # submitted_jobs_dask_temp, finished_jobs_temp = self.check_dask_jobs(submitted_jobs_dask,finished_jobs) finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict)", "path): print('LeDock path is set to ', path) self.ledock_path = path def prep_mdtraj_object(self):", "# self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) # self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) # self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) # self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) #", "'RNGOPT LOCCLF ' + 'global_binding_site.pdb\\n\\n' # Specify the degrees of freedom (DOF) of", "#EXCHET # Include water molecules in the target (always removed by default) #", "self.molecule_name = molname self.ligand_name = molname self.receptor_name = receptor_name self.run_type = 'g_mmpbsa' self.state_data", "quit # sys.exit() print(\"LeDock command generation finished\") else: print('Please setup LeDock settings') except", "y in flexible_index_list_phrases: final_str += y final_str += '\\n' rmsdst = 'RMSDST '", "self.topology_file = topol self.tpr_file = tpr_file self.mdp_file = mdp_file self.index_file = index_file self.first_index", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN", "# except: # print('LeDock setting part is empty verify yolo') # # try:", "# self.ledock_zmax = self.ledock_data['LeDock_params']['zmax'] # # except: # print('LeDock_params is empty verify yolo')", "print('TADA ', self.directories) test = 1 # This will hold information about run", "overwrite=True) self.ledock_folder_exists = True test = 1 # TODO enter ledock folder and", "# Floats in interval [0.0,1.0] ADAPTKCO 0.95 0.10 0.95 0.10 # Constant crossover", "= list(range(len(select_frames))) folder_to_save = self.g_mmpbsa_folder temp_mdtraj = [] temp_mdtraj_indexes = [] file_save_list =", "REPMODEL BOOM # Fraction of population to create # Only considered when REPMODEL", "############################################################################################### # # # work_address = workstation1_preped['workerAddress'] # # # # # This", "= '''''' # Specify the processed target file to use pdbnam = 'PDBNAM", "is 0 CROSRATE 0.90 # Constant mutation probability # Float in interval [0.0,1.0]", "for worker in get_worker_free: # preped = get_worker_free[worker]['preped'] # total_free_cores += preped['freeCores'] if", "the target (always removed by default) # Only considered if EXCHET is disabled", "folder don't forget # Exhaustiveness for all samples # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type)", "all samples # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories_samples = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name_samples) print('TADA ',", "self.mdtraj_parts = temp_mdtraj_indexes self.file_save_list = file_save_list self.abs_file_save_list = abs_file_save_list self.simStates = simStates test", "Only considered when REPMODEL is STEADY # Integer in interval [1,N-1] where N", "EPI_uber_dock.prepare_uber_dock_protocol() >>> EPI_uber_dock.run_uber_dock_protocol() Use together >>> self.prepare_uber_dock_protocol() for preparation >>> self.run_uber_dock_protocol() or seperately", "abs_folder = self.g_mmpbsa_folder # original_data['AbsFolder'] out_name = abs_folder + os.sep + results_dask['out_filename'] out_mem", "''' final_str = '''''' # Specify the processed target file to use pdbnam", "IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR", "= self.json_state_file self.load_state_called = True self.load_state_data_json(self.load_state_file) def prepare_ledock_settings(self): ''' Prepare ultraDock folder and", "# filename = self.json_state_file # filename = self.absolute_json_state_file # filedata = self.state_data self.save_state_data_json(filedata=self.before_dask,", "for i, job in enumerate(submitted_jobs_dask): status = job.status if status == 'finished': test", "self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list'] # Divide trajectory to number of free cores # TODO article Pagadala", "= self.state_data['absoluteJsonStates'] self.g_mmpbsa_folder = self.state_data['RunFolder'] self.json_state_file = self.state_data['jsonStates'] test = 1 # self.rdock_folder_name", "as md from molmolpy.utils.cluster_quality import * from molmolpy.utils import folder_utils import json from", "# self.output_ligand_rdock.write(self.ligand_pybel ) # self.output_ligand_rdock.close() self.ledock_folder_name = self.receptor_name + '_' + self.molecule_name +", "neighborhood queries, which increases the memory complexity to O(n.d) where d is the", "continue except Exception as error: pass finished_jobs.append(job) finished_jobs_dict.update({i: True}) results = job.result() test", "# queue_jobs = self.run_mmpbsa_dask # job_test = queue_jobs[0] # # result = run_dask_tools.run_gmmpbsa_using_dask(job_test)", "a lot better using scatter for big files for upload G_MMPBSA files #", "return self.molecule_name def get_receptor_name(self): return self.receptor_name def set_molecule_name(self, mol_name): self.molecule_name = mol_name def", "= [] queue_jobs = self.run_mmpbsa_dask job_quantity = len(queue_jobs) finished_jobs = [] finished_jobs_dict =", "Rotamer Library #ROTOBS # Defines the grid spacing of the binding-site # Float", "worker_info = get_worker_free[worker] # worker_address = worker_info['preped']['workerAddress'] # # retries_num = 2 #", "os.sep + '{0}.inp'.format(self.ligand_flexaid_initials), 'r') flexible_bonds_data_text = flexible_bonds_data.read() flexible_bonds_data.close() flexible_bonds_data_text_list = flexible_bonds_data_text.split('\\n') flexible_index_list_phrases =", "set_ledock_path(self, path): print('LeDock path is set to ', path) self.ledock_path = path def", "Adaptive Genetic-Algorithm # Value of 0 or 1 ADAPTVGA 1 # Adaptive crossover", "in interval [0.0,1000.0] SHAREALF 4.0 SHAREPEK 5.0 SHARESCL 10.0 # Reproduction model #", "\" \\ \"--out {11}_out.pdbqt\".format(self.receptor_file, self.ligand_file, self.x_center, self.y_center, self.z_center, self.x_size, self.y_size, self.z_size, self.samples_exhaust, self.num_modes,", "part for a while >>> EPI_uber_dock.prepare_rdock_settings() >>> EPI_uber_dock.generate_rdock_cavity() >>> # Prepare and run", "temp_state[str(i)].update({'fileSaved': True }) simStates['simStates'].update(temp_state) self.mdtraj_frames = select_frames self.mdtraj_sliced = temp_mdtraj self.mdtraj_parts = temp_mdtraj_indexes", "self.ledock_path = path def prep_mdtraj_object(self): ''' Prepare receptor mdtraj object get mdtraj topology", "index_file self.first_index = first_index self.second_index = second_index self.prep_g_mmpbsa_run = False self.folder_exists = False", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS", "os.sep + 'centroid.pdb' >>> ligand_file = EPI_folder + os.sep + 'EPI.pdb' >>> molname", "as FLEDIH lines in Processed_files/BTN.inp # In our example, Biotin has 5 flexible", "= self.state_data['energySoftware']['g_mmpbsa']['simStates'] test = 1 self.g_mmpbsa_prepared = self.state_data['energySoftware']['g_mmpbsa']['prepare'] # self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) # self.state_data['energySoftware']['g_mmpbsa'].update({'prepare':", "our example, Biotin has 5 flexible bonds flexible_bonds_data = open( self.flexaid_absolute_processed_files_folder + os.sep", "print('\\n') def prepare_for_dask_cluster(self, LeDock=2, rDock=2, FlexAid=2, Vina=2, parallel=False): ''' run uber dock protocol", "in interval [1,N] where N is NUMCHROM PRINTCHR 10 ''' generate_ga_dat = 'ga_inp_'", "test = 1 self.receptor_ledock_pdb = \"{0}.pdb\".format(self.receptor_name) self.ligand_ledock_mol2 = \"{0}.mol2\".format(self.ligand_name) self.absolute_receptor_ledock_pdb = self.ledock_absolute_folder_name +", "dataframe Calculate pdb receptor center of mass :return: ''' self.trajectory_mdtraj = md.load_xtc(self.trajectory_file, top=self.topology_file)", "= self.state_data_samples['boxSettings']['size_y'] self.z_size = self.state_data_samples['boxSettings']['size_z'] self.num_modes = self.state_data_samples['boxSettings']['numModes'] def hold_nSec(self, n): for i", "+ os.sep + self.run_folder_name def prepare_g_mmpbsa_dask_protocol(self, dask_client=None, prep_g_mmpbsa=True): ''' prepare dask tasks for", "results/docking poses to output MAXRES 20 # Only output scored atoms in the", "# data, # # workers=[work_address], # # key='key_test', # # retries=retries_num) # #", "mdp_filename = mdp_abs.split(os.sep)[-1] index_abs= full_g_mmpbsa_data['indexFile'] index_file = open(index_abs, 'r') index_mem = index_file.read() index_filename", "# data_pre = self.state_data['energySoftware']['g_mmpbsa'] # data_pre.update({'dask': {}}) data_pre = {} data_pre.update({'tprName':tpr_filename, 'tprMem':tpr_mem}) data_pre.update({'mdpName':mdp_filename,", "\\n') ############################################################################## def flexaid_generate_ga_dat_parameters(self): ''' Generate GA dat parameters for flexaid docking :return:", "\"{0}.sd\".format(self.ligand_name)) # self.output_ligand_rdock.write(self.ligand_pybel ) # self.output_ligand_rdock.close() self.ledock_folder_name = self.receptor_name + '_' + self.molecule_name", "command generation finished\") else: print('Please setup LeDock settings') except Exception as e: print(\"error", "= run_dask_tools.check_free_resources(worker_status_temp) custom_index_curr += 2 print('----------------TEST------------------') curr_item = queue_jobs[curr_index] test = 1 curr_worker_id", "copy # modified_submitted_jobs_dask = copy.deepcopy(submitted_jobs_dask) for i, job in enumerate(submitted_jobs_dask): status = job.status", "{0} '.format(self.receptor_ledock_pdb) os.system(command_receptor) self.lepro_pdb_file = 'pro.pdb' # Need to check whteter lepro ran", "in interval [1-N] NUMCHROM 500 # Number of generations # Integer in interval", "# self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param': self.generate_ga_dat_pameters, # 'GA_DataName': self.generate_ga_dat, #", "if EXCHET is disabled # To include water molecules, uncomment the next line", ">>> >>> EPI_uber_dock.prepare_uber_docker() >>> >>> >>> #This is for rDock, and it works", "self.state_data['energySoftware']['g_mmpbsa'] test = 1 tpr_abs= full_g_mmpbsa_data['tprFile'] tpr_file = open(tpr_abs, 'rb') tpr_mem = tpr_file.read()", "\"w\"), sort_keys=True, indent=4) # TODO should I add json saving of information or", "filename: load json state data :return: ''' # self.absolute_path = os.path.abspath(filename) self.load_state_called =", "to pandas dataframe. Create MoleculeObject by parsing pdb or pdbqt file. 2 types", "to ', path) self.ledock_path = path def prep_mdtraj_object(self): ''' Prepare receptor mdtraj object", "Only considered when REPMODEL is BOOM BOOMFRAC 1.0 # Number of new individuals", "calculations of surfaces in contact COMPLF VCT # Do not consider intramolecular interactions", "to generate at each generation # Only considered when REPMODEL is STEADY #", "CLRMSD 2.0 # Number of results/docking poses to output MAXRES 20 # Only", "to use one or multiple cleft(s) as binding-site rgnopt_locclf = 'RNGOPT LOCCLF '", "mdp_filename}) data['dask'].update({'indexName': index_filename}) data['dask'].update({'trajMem':traj_mem, 'trajName':traj_filename}) data['dask'].update({'tprName': tpr_filename, 'tprMem': tpr_mem}) data['dask'].update({'mdpName': mdp_filename, 'mdpMem': mdp_mem})", "in the documentation and/or other materials provided # with the distribution. # *", "num_threads - LeDock-rDock-FlexAid run_g_mmpbsa = [] run_mmpbsa_queue = [] # Prepare outputs import", "# Specify the processed target file to use pdbnam = 'PDBNAM ' +", "generate at each generation # Only considered when REPMODEL is STEADY # Integer", "filedata is not None: # filedata = filedata # filename = self.absolute_json_state_file else:", "forms, with or without # modification, are permitted provided that the following conditions", ":return: ''' # import json # with open(filename, 'w') as outfile: # json.dump(self.cluster_models,", "'setup': self.setup_box, 'folderCreated': self.folder_exists_samples, 'simStates': {}}) self.prepVinaSim_samples() self.save_state_data_json(filedata=self.state_data_samples, filename=self.json_samples_state_file) self.load_state_called_samples = False self.prep_sample_run", "y final_str += '\\n' rmsdst = 'RMSDST ' + '{0}_ref.pdb\\n\\n'.format( self.ligand_flexaid_initials) final_str +=", "= \"g_mmpbsa_part_{0}\".format(part_num) data.update({'Program': 'g_mmpbsa'}) data.update({'part_num': part_num}) data.update({'save_run_name': save_run_name}) data.update({'dask': {}}) traj_abs = data['absFileSave']", "check whteter lepro ran fine print('Updated receptor with LePro\\n') os.chdir(self.uber_dock_folder) self.state_data['dockSoftware']['LeDock'].update( {'receptor_pdb': self.receptor_ledock_pdb,", "FlexAid=2, Vina=2, parallel=False): ''' run uber dock protocol for LeDock, rDock,FlexAid, Vina :return:", "configuration >>> EPI_uber_dock.prepare_rdock_settings() Convert with pybel to mol2 for receptor and sd for", "= 1 # TODO enter ledock folder and process structure for docking using", "= traj_file.read() traj_filename = data['fileSave'] data['dask'].update({'tprName': tpr_filename}) data['dask'].update({'mdpName': mdp_filename}) data['dask'].update({'indexName': index_filename}) data['dask'].update({'trajMem':traj_mem, 'trajName':traj_filename})", "+ results_dask['out_filename'] out_mem = results_dask['out_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name =", ":param pose_gen: default generate 20 poses :return: ''' try: if self.setup_ledock_pameters is not", "''' generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' + self.ligand_name + '.dat' generate_ga_dat_name_abs", "= 'OPTIMZ 9999 {0} -1\\n\\n'.format(self.flexaid_res_chain) # Rotational DOF of the ligand (0) optimz2", "samples # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories_samples = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name_samples) print('TADA ', self.directories_samples)", "folder for vina samples run\\n') print('Vina run type: {0}'.format(self.run_type_samples)) print(self.sim_folder_run_samples) folder_utils.create_folder(self.sim_folder_run_samples) self.folder_exists_samples =", "= molname self.ligand_name = molname self.receptor_name = receptor_name self.run_type = 'g_mmpbsa' self.state_data =", "= run_dask_tools.check_free_resources(worker_status) # # # test = 1 # # total_free_cores = 0", "file formats. Then converts to pandas dataframe. Create MoleculeObject by parsing pdb or", "as well as numpy list. Read more in the :ref:`User Guide <MoleculeObject>`. Parameters", "curr_item_prog == 'g_mmpbsa': if workstation_freemem > gmmbpsa_min_mem and jobs_running <max_jobus: print('Submit MMPBSA job", "data_pre = self.state_data['energySoftware']['g_mmpbsa'] # data_pre.update({'dask': {}}) data_pre = {} data_pre.update({'tprName':tpr_filename, 'tprMem':tpr_mem}) data_pre.update({'mdpName':mdp_filename, 'mdpMem':mdp_mem})", "self.state_data['receptorName'] # TODO test self.sim_folder_run = self.state_data['simRunFolder'] # .split('/')[-1] self.directories = self.state_data['directory'] self.folder_exists", "parallel=False): ''' run uber dock protocol for LeDock, rDock,FlexAid, Vina :return: ''' current_pid", "value is 0.0 the solvent interactions are derived from the interaction matrix #", "the processed ligand file to use # BTN.inp has the unique RESNUMC identifier", "print(command_to_run) self.command_samples_run_list.append(command_to_run) print(\"Launching new Sim\") self.state_data_samples['simStates'].update({str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'runFinished': False}}) #", "1 curr_client = client worker_status = run_dask_tools.get_dask_worker_status(curr_client) get_worker_free = run_dask_tools.check_free_resources(worker_status) import copy original_get_worker_free", "type: {0}'.format(self.run_type_samples)) print(self.sim_folder_run_samples) folder_utils.create_folder(self.sim_folder_run_samples) self.folder_exists_samples = True self.state_data_samples.update({'receptorFile': self.receptor_file, 'ligandFile': self.ligand_file, 'exhaustivenessList': self.exhaustiveness,", "# Add one extra line for each flexible bond of the ligand #", "interval [0.0,1.0] ADAPTKCO 0.95 0.10 0.95 0.10 # Constant crossover probability # Float", "'_' + str(sample_num) command_to_run = \"vina --receptor {0} \" \\ \"--ligand {1} \"", "molmolpy.moldock import run_dask_tools from molmolpy.tools import run_dask_tools test = 1 curr_client = client", "+ self.run_folder_name def prepare_g_mmpbsa_dask_protocol(self, dask_client=None, prep_g_mmpbsa=True): ''' prepare dask tasks for g_mmpbsa :return:", "@hlp.timeit def prepVinaSampleCommand(self, sample_num): # try: if self.setup_box is not False: # print(\"Running", "= final_queue_job # random.shuffle(self.run_docking_queue) print('Finished preparing g_mmpbsa jobs') # TODO should I add", "# # total_free_cores = 0 # # for worker in get_worker_free: # preped", "True self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex':", "self.mdtraj_frames}) self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex': self.first_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex':", "os.sep + self.ligand_ledock_mol2 self.receptor_pybel.write(\"pdb\", self.absolute_receptor_ledock_pdb, overwrite=True) self.ligand_pybel.write(\"mol2\", self.absolute_ligand_ledock_mol2, overwrite=True) self.ledock_folder_exists = True test", "= 1 curr_client = client worker_status = run_dask_tools.get_dask_worker_status(curr_client) get_worker_free = run_dask_tools.check_free_resources(worker_status) import copy", "test = 1 # run_docking_queue += run_docking_rDock[curr_rDock:curr_rDock + rDock] # curr_rDock += rDock", "= self.state_data_samples['folderPath'] self.run_type = self.state_data_samples['runType'] self.molecule_name = self.state_data_samples['molName'] self.receptor_name = self.state_data_samples['receptorName'] # TODO", "+ 1): print(i) time.sleep(1) # Delay for 1 sec print('Ok %s secs have", "LeDock-rDock-FlexAid run_g_mmpbsa = [] run_mmpbsa_queue = [] # Prepare outputs import copy self.before_dask", "ENDINP ''' final_str = '''''' # Specify the processed target file to use", "---\\n') print('---' * 10) return finished_jobs, finished_jobs_dict # @hlp.timeit def run_dask_gmmpbsa(self, client=None, max_jobs_to_run=10):", "self.trajectory_mdtraj_topology = self.trajectory_mdtraj.topology self.trajectory_mdtraj_topology_dataframe = self.trajectory_mdtraj.topology.to_dataframe() self.objects_loaded = True def get_uber_g_mmpbsa_run_folder_name(self): curr_folder =", "> 0: if curr_index == len(queue_jobs): curr_index = 0 if curr_worker == len(worker_ids):", "is None and filedata is None: # # filename = self.json_state_file # filename", "# Crossover operator # Intragenic crossovers are possible INTRAGEN # Specifies that the", "to \", worker_address) test = 1 # TODO # This part runs the", "IT WORKS # queue_jobs = self.run_mmpbsa_dask # job_test = queue_jobs[0] # # result", "self.state_data['absoluteJsonStates'] self.g_mmpbsa_folder = self.state_data['RunFolder'] self.json_state_file = self.state_data['jsonStates'] test = 1 # self.rdock_folder_name =", "self.state_data['runType'] self.molecule_name = self.state_data['molName'] self.receptor_name = self.state_data['receptorName'] # TODO test self.sim_folder_run = self.state_data['simRunFolder']", "the ligand (0) optimz2 = 'OPTIMZ 9999 {0} 0\\n\\n'.format(self.flexaid_res_chain) # Add one extra", "md from molmolpy.utils.cluster_quality import * from molmolpy.utils import folder_utils import json from molmolpy.utils", "= self.ledock_data['LeDock_params']['title'] # # self.receptor_file_ledock = self.ledock_data['LeDock_params']['receptorFile'] # self.ledock_rmsd = self.ledock_data['LeDock_params']['LeDockRMSD'] # #", "file Flexaid is very strict about spaces :return: ''' flexaid_config_input_template = '''# Optimization", "crossover and mutation probabilities # Floats in interval [0.0,1.0] ADAPTKCO 0.95 0.10 0.95", "10) print('\\n') def prepare_for_dask_cluster(self, LeDock=2, rDock=2, FlexAid=2, Vina=2, parallel=False): ''' run uber dock", "# Float in interval [0.0,1.0] from fully permeable to no permeability PERMEA 0.9", "file to use # BTN.inp has the unique RESNUMC identifier LIG9999A inplig =", "[] self.command_samples_run_list = [] self.molecule_name = molname self.ligand_name = molname self.receptor_name = receptor_name", "# Maximum parallel #div_traj = math.trunc(traj_len/total_free_cores) select_frames = list(range(0,traj_len,div_traj)) select_indexes = list(range(len(select_frames))) folder_to_save", "sorted(dir_names) except Exception as e: print(\"Problem with finding folders : \", e) sys.exit(0)", "def prepare_ledock_settings(self): ''' Prepare ultraDock folder and initial json configuration >>> EPI_uber_dock.prepare_rdock_settings() Convert", "(-1) optimz1 = 'OPTIMZ 9999 {0} -1\\n\\n'.format(self.flexaid_res_chain) # Rotational DOF of the ligand", "self.num_modes, self.save_run_name, self.save_run_name) print(command_to_run) self.command_samples_run_list.append(command_to_run) print(\"Launching new Sim\") self.state_data_samples['simStates'].update({str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run,", "# TODO need to think about seed #./ ledock_linux_x86 dock. in command_receptor =", "submitted_jobs = [] submitted_jobs_dask = [] queue_jobs = self.run_mmpbsa_dask job_quantity = len(queue_jobs) finished_jobs", "# # self.ledock_data = self.state_data['dockSoftware']['LeDock'] # test = 1 # # # Try", "data_pre.update({'mdpName':mdp_filename, 'mdpMem':mdp_mem}) data_pre.update({'indexName':index_filename, 'indexMem':index_mem}) self.dask_prep = data_pre for part_num in full_g_mmpbsa_data['parts']: # self.run_FlexAid_sim(FlexAid_sample_num,", "self.uber_dock_folder = self.get_uber_dock_run_folder_name() if len(self.ledock_directories) == 0: print('Creating rdock folder in uberDocker folder", "# # retries=retries_num) # # # TODO This part needs further refinement #", "\"\"\" def __init__(self, traj, topol, tpr_file, mdp_file, index_file, first_index, second_index, molname='Unknown', receptor_name='Unknown', folder_path='.',", "= self.state_data_samples['directory'] self.setup_box = self.state_data_samples['setup'] self.folder_exists = self.state_data_samples['folderCreated'] self.x_center = self.state_data_samples['boxSettings']['center_x'] self.y_center =", "runSim: \", e) # sys.exit(0) def get_molecule_name(self): return self.molecule_name def get_receptor_name(self): return self.receptor_name", "may be used to endorse or promote products derived # from this software", "# # Upload files to all clients client.upload_file # task = client.submit(run_dask_tools.upload_g_mmpbsa_files_dask, #", "PID:{}\".format(current_pid)) # free_threads_for_Vina = num_threads - LeDock-rDock-FlexAid run_g_mmpbsa = [] run_mmpbsa_queue = []", "them to be considered as in the same neighborhood. >>> LasR_MOR_mmpbsa_calc = g_mmpbsa_dask.GMMPBSAObject(traj,", "== 0: print('Creating rdock folder in uberDocker folder \\n') print(self.ledock_directories) folder_utils.create_folder(self.ledock_absolute_folder_name) test =", "#./ ledock_linux_x86 dock. in command_receptor = self.ledock_path + os.sep + 'ledock_linux_x86' sample_data =", "be thought out #################################################################################################################### def flexaid_generate_ga_dat_parameters_dask(self): ''' Generate GA dat parameters for flexaid", "# total_free_cores += preped['freeCores'] if prep_g_mmpbsa is False: print('prep gmmpbsa ', prep_g_mmpbsa) return", "ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT", "EPI_uber_dock.flexaid_generate_config_input() >>> EPI_uber_dock.prep_FlexAid_dock_run_commands() >>> EPI_uber_dock.run_FlexAid_simulation(parallel=True, waitTime=15) >>> >>> >>> # This is for", "out_file.close() out_name = abs_folder + os.sep + results_dask['energyMM_filename'] out_mem = results_dask['energyMM_mem'] out_file =", "generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' + self.ligand_name + '.dat' generate_ga_dat_name_abs =", "self.state_data_samples['runType'] self.molecule_name = self.state_data_samples['molName'] self.receptor_name = self.state_data_samples['receptorName'] # TODO test self.samples_exhaust = self.state_data_samples['samples_exhaust']", "the next line if you wish to obtain the whole complex SCOOUT #", "# self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories_samples = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name_samples) print('TADA ', self.directories_samples) self.json_samples_state_file", "# TODO article Pagadala Software for molecular docking: a review # This will", "5.0 # The variation in degrees for flexible dihedrals of the ligand #", "filedata is None: # filename = self.json_state_file filename = self.absolute_json_state_file filedata = self.state_data", "+ self.run_type_samples self.sim_folder_run_samples = self.folder_path + os.sep + self.run_folder_name_samples # Create folder don't", "and filedata is None: # filename = self.json_state_file filename = self.absolute_json_state_file filedata =", "above # copyright notice, this list of conditions and the following # disclaimer", "self.state_data_samples['boxSettings']['size_x'] self.y_size = self.state_data_samples['boxSettings']['size_y'] self.z_size = self.state_data_samples['boxSettings']['size_z'] self.num_modes = self.state_data_samples['boxSettings']['numModes'] def hold_nSec(self, n):", "prepare run files' if self.g_mmpbsa_prepared is True: print('Do not prep files') return 'Do", "self.receptor_name + '_' + self.molecule_name + '_' + self.run_type self.sim_folder_run = self.folder_path +", "in the same neighborhood. >>> LasR_MOR_mmpbsa_calc = g_mmpbsa_dask.GMMPBSAObject(traj, topol_file, tpr_file, mdp_file, index_file, first_index,", "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #", "Testing Phase total_free_cores = 16 # Production # worker_status = run_dask_tools.get_dask_worker_status(curr_client) # #", "= worker_ids[curr_worker] workstation_info_temp = get_worker_free_temp[curr_worker_id] workstation_preped_temp = workstation_info_temp['preped'] workstation_address = workstation_preped_temp['workerAddress'] # This", "\"g_mmpbsa_part_{0}\".format(part_num) data.update({'Program': 'g_mmpbsa'}) data.update({'part_num': part_num}) data.update({'save_run_name': save_run_name}) data.update({'dask': {}}) traj_abs = data['absFileSave'] traj_file", "pop_item task_g_mmpbsa = client.submit(run_dask_tools.run_gmmpbsa_using_dask, big_future, workers=[workstation_address], key=run_name, retries=retries_num) submitted_jobs_dask.append(task_g_mmpbsa) else: key_name = curr_item['save_run_name']", "This will hold information about run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() if len(self.ledock_directories)", "# data['dask'].update({'cavFile':cav_file_mem }) # self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)] = data test = 1 run_g_mmpbsa.append(data) # #", "10.0 # Use Vcontacts in the calculations of surfaces in contact COMPLF VCT", "[] run_mmpbsa_queue = [] # Prepare outputs import copy self.before_dask = copy.deepcopy(self.state_data) ################################################################################", "{} worker_status_free = None test = 1 # maybe 2 async threads, one", ":return: ''' self.prepare_g_mmpbsa() test = 1 curr_client = dask_client # Testing Phase total_free_cores", "print('-----------------------------------------------------------------') worker_status_temp = run_dask_tools.get_dask_worker_status(curr_client, custom_index=custom_index_curr) get_worker_free_temp = run_dask_tools.check_free_resources(worker_status_temp) custom_index_curr += 2 print('----------------TEST------------------') curr_item", "# run_docking_queue += run_docking_LeDock[curr_LeDock:curr_LeDock + LeDock] # curr_LeDock += LeDock # # test", "+ results_dask['apolar_filename'] out_mem = results_dask['apolar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name =", "+ '.json' if filename is None and filedata is None: # filename =", "permeability ROTPER 0.8 # Solvent term penalty # When the value is 0.0", "wish to obtain the whole complex SCOOUT # Only calculate the CF for", "Create folder don't forget # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name)", "obtain the whole complex SCOOUT # Only calculate the CF for ligand atoms", "= 1 self.g_mmpbsa_prepared = True self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list})", "= self.trajectory_mdtraj[traj:traj+div_traj] temp_mdtraj.append(temp_traj) temp_mdtraj_indexes.append(i) file_save = 'traj_part{0}.xtc'.format(i) abs_file_save = folder_to_save + os.sep +", "loaded \\n') ############################################################################## def flexaid_generate_ga_dat_parameters(self): ''' Generate GA dat parameters for flexaid docking", "# contributors may be used to endorse or promote products derived # from", "FlexAid >>> EPI_uber_dock.prepare_flexaid_settings() >>> EPI_uber_dock.process_flexaid_ligand() >>> EPI_uber_dock.get_flexaid_clefts() >>> EPI_uber_dock.flexaid_generate_ga_dat_parameters() >>> EPI_uber_dock.flexaid_generate_config_input() >>> EPI_uber_dock.prep_FlexAid_dock_run_commands()", "self.ledock_data['LeDock_params']['ymin'] # self.ledock_ymax = self.ledock_data['LeDock_params']['ymax'] # self.ledock_zmin = self.ledock_data['LeDock_params']['zmin'] # self.ledock_zmax = self.ledock_data['LeDock_params']['zmax']", "\"--size_x {5} \" \\ \"--size_y {6} \" \\ \"--size_z {7} \" \\ \"--exhaustiveness", "part_num in full_g_mmpbsa_data['parts']: # self.run_FlexAid_sim(FlexAid_sample_num, waitTime=waitTime) data = self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)] save_run_name = \"g_mmpbsa_part_{0}\".format(part_num) data.update({'Program':", "= os.path.abspath(filename) self.load_state_called = True print(os.path.abspath(__file__)) self.state_data = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.trajectory_file", "command :param sample_num: :param pose_gen: default generate 20 poses :return: ''' try: if", "Only considered if EXCHET is disabled # To include water molecules, uncomment the", "+ self.generate_ga_dat self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) self.generate_ga_dat_object_file.close() self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) self.state_data['dockSoftware']['FlexAid']['GA_params'].update( {'generateGA_param': self.generate_ga_dat_pameters,", "= \\ # results[key] update_results = copy.deepcopy(results) update_results[key].pop('dask', None) # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] =", "'''''' # Specify the processed target file to use pdbnam = 'PDBNAM '", "'g_mmpbsa' self.state_data = {} self.state_data_samples = {} self.g_mmpbsa_run_finished = False self.g_mmpbsa_sim_states = {'simStates':", "of the ligand # Float in interval [1.0-30.0] VARANG 5.0 # The variation", "+ 'rDock' # self.rdock_absolute_folder_name = self.uber_dock_folder + os.sep + self.rdock_folder_name # self.directories =", "\", e) sys.exit(0) @hlp.timeit def check_dask_jobs(self, submitted_jobs_dask, finished_jobs, finished_jobs_dict): import copy # modified_submitted_jobs_dask", "= self.state_data_samples['molName'] self.receptor_name = self.state_data_samples['receptorName'] # TODO test self.samples_exhaust = self.state_data_samples['samples_exhaust'] self.sim_folder_run_samples =", "for the anchor angle of the ligand # Float in interval [1.0-30.0] VARANG", "True: print('Do not prep files') return 'Do not prep files' traj_len = len(self.trajectory_mdtraj)", "+ '{0}.inp\\n\\n'.format( self.ligand_flexaid_initials) # Specify to use one or multiple cleft(s) as binding-site", "self.absolute_json_state_file else: filedata = filedata filename = filename json.dump(filedata, open(filename, \"w\"), sort_keys=True, indent=4)", "OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF", "memory complexity to O(n.d) where d is the average number of neighbors, while", "self.load_state_called_samples = True print(os.path.abspath(__file__)) self.state_data_samples = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.receptor_file = self.state_data_samples['receptorFile']", "= receptor_name # This might need to get modified def find_sample_files(self, folder): try:", "'_' + self.molecule_name + '_' + 'LeDock' self.ledock_absolute_folder_name = self.uber_dock_folder + os.sep +", "to think about seed #./ ledock_linux_x86 dock. in command_receptor = self.ledock_path + os.sep", "find_sample_folders(self, folder_path='.', dir_name='vina_sample'): try: dir_names = [] for dirname, dirnames, filenames in os.walk(folder_path):", "test = 1 #################################################################################################### self.run_mmpbsa_dask = run_g_mmpbsa curr_LeDock = 0 # very slow", "filename): ''' :param filename: load json state data :return: ''' # self.absolute_path =", "test = 1 def prepare_g_mmpbsa(self): ''' Prepare g_mmpbsa run folder and initial json", "A # Translational DOF of the ligand (-1) optimz1 = 'OPTIMZ 9999 {0}", "not finished for pre_job in run_mmpbsa_queue: # print(pre_job) if pre_job['runFinished'] is False: final_queue_job.append(pre_job)", "== 1: curr_index = 0 else: curr_index += 1 curr_worker += 1 time.sleep(10)", "information or not? def load_state_data_json(self, filename): ''' :param filename: load json state data", "\"--size_z {7} \" \\ \"--exhaustiveness {8} \" \\ \"--num_modes {9} \" \\ \"--seed", "while original DBSCAN had memory complexity O(n). Sparse neighborhoods can be precomputed using", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS", "Use together >>> self.prepare_uber_dock_protocol() for preparation >>> self.run_uber_dock_protocol() or seperately >>> EPI_uber_dock.calculate_max_radius_from_com() >>>", ">>> >>> >>> >>> receptor_file = EPI_folder + os.sep + 'centroid.pdb' >>> ligand_file", "'indexFile':self.index_file, 'mdpFile':self.mdp_file, 'tprFile':self.tpr_file}) energy_mm = 'energy_MM_{0}.xvg'.format(i) polar = 'polar_{0}.xvg'.format(i) apolar = 'apolar_{0}.xvg'.format(i) contrib_mm", "ligand # Float in interval [1.0-30.0] VARDIH 5.0 # The variation in degrees", "= multiprocessing.current_process().pid print(\"Main Process with PID:{}\".format(current_pid)) # free_threads_for_Vina = num_threads - LeDock-rDock-FlexAid run_g_mmpbsa", "self.tpr_file = tpr_file self.mdp_file = mdp_file self.index_file = index_file self.first_index = first_index self.second_index", "copy.deepcopy(submitted_jobs_dask) for i, job in enumerate(submitted_jobs_dask): status = job.status if status == 'finished':", "= 0 print('-----------------------------------------------------------------') worker_status_temp = run_dask_tools.get_dask_worker_status(curr_client, custom_index=custom_index_curr) get_worker_free_temp = run_dask_tools.check_free_resources(worker_status_temp) custom_index_curr += 2", "of pdb and pbdqt file formats. Then converts to pandas dataframe. Create MoleculeObject", "worker in get_worker_free: # preped = get_worker_free[worker]['preped'] # total_free_cores += preped['freeCores'] if prep_g_mmpbsa", "print('This success ---> ', i) except Exception as error: print('error is ', error)", "abs_folder + os.sep + results_dask['contrib_apol_filename'] out_mem = results_dask['contrib_apol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem)", "self.output_ligand_rdock.close() self.ledock_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + 'LeDock' self.ledock_absolute_folder_name", "<NAME> # # <!-------------------------------------------------------------------------- # # Copyright (c) 2016-2019,<NAME>. # All rights reserved.", "run_dask_tools.check_free_resources(worker_status) # # # test = 1 # # total_free_cores = 0 #", "= 'PDBNAM ' + '{0}\\n\\n'.format( self.receptor_flexaid_mol2) # Specify the processed ligand file to", "uber dock protocol for LeDock, rDock,FlexAid, Vina :return: ''' current_pid = multiprocessing.current_process().pid print(\"Main", "self.setup_box = self.state_data_samples['setup'] self.folder_exists = self.state_data_samples['folderCreated'] self.x_center = self.state_data_samples['boxSettings']['center_x'] self.y_center = self.state_data_samples['boxSettings']['center_y'] self.z_center", "run_g_mmpbsa = [] run_mmpbsa_queue = [] # Prepare outputs import copy self.before_dask =", "pass' % (n)) @hlp.timeit def prepVinaSampleCommand(self, sample_num): # try: if self.setup_box is not", "\"vina --receptor {0} \" \\ \"--ligand {1} \" \\ \"--center_x {2} \" \\", "buggy workstation_dir = original_get_worker_free[curr_worker_id]['preped']['workerDir'] workstation_freemem = workstation_preped_temp['freeMemory'] workstation_freecpu = workstation_preped_temp['freeCores'] curr_item_prog = curr_item['Program']", "print(i) time.sleep(1) # Delay for 1 sec print('Ok %s secs have pass' %", "self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) except: print('G_mmpbsa is empty verify yolo') # # test = 1 #", "self.state_data_samples['boxSettings']['center_y'] self.z_center = self.state_data_samples['boxSettings']['center_z'] self.x_size = self.state_data_samples['boxSettings']['size_x'] self.y_size = self.state_data_samples['boxSettings']['size_y'] self.z_size = self.state_data_samples['boxSettings']['size_z']", "<MoleculeObject>`. Parameters ---------- filename : str, optional The maximum distance between two samples", "self.state_data['energySoftware']['g_mmpbsa']['simStates'] #self.ledock_samples = self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list'] # Divide trajectory to number of free cores #", "that the initial population is generated randomly POPINIMT RANDOM # Fitness function #", "# curr_rDock += rDock # # run_docking_queue += run_docking_FlexAid[curr_FlexAid:curr_FlexAid + FlexAid] # #", "# self.output_receptor_rdock.write(self.receptor_pybel) # self.output_receptor_rdock.close() # # self.output_ligand_rdock = Outputfile(\"sd\", \"{0}.sd\".format(self.ligand_name)) # self.output_ligand_rdock.write(self.ligand_pybel )", ">>> EPI_uber_dock.calculate_box_edges_from_com() >>> >>> >>> EPI_uber_dock.prepare_uber_docker() >>> >>> >>> #This is for rDock,", "of 0 or 1 ADAPTVGA 1 # Adaptive crossover and mutation probabilities #", "self.ligand_pybel.write(\"mol2\", self.absolute_ligand_ledock_mol2, overwrite=True) self.ledock_folder_exists = True test = 1 # TODO enter ledock", "else: print('Please setup LeDock settings') except Exception as e: print(\"error in runSim: \",", "rmsdst final_str += flexaid_config_input_template generate_config_input_file = 'CONFIG_' + self.receptor_name + '-' + self.ligand_name", "self.sim_folder_run_samples = self.folder_path + os.sep + self.run_folder_name_samples # Create folder don't forget #", "dat parameters for flexaid docking :return: ''' self.flexaid_ga_dat_param_template = '''# Number of chromosomes", "Permeability allowed between atoms # Float in interval [0.0,1.0] from fully permeable to", "of conditions and the following # disclaimer in the documentation and/or other materials", "HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "'folderPath': self.folder_path, 'jsonStates': self.json_state_file, 'runType': self.run_type, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run, 'RunFolder':", "self.g_mmpbsa_folder, 'absoluteJsonStates': self.absolute_json_state_file, 'directory': self.directories, 'folderCreated': self.folder_exists, 'simStates': {}}) self.state_data.update(programs_dict) # self.prepVinaSim_exhaust() self.save_state_data_json()", "= 1 # tasks_upload = [] # big_future = client.scatter(self.dask_prep, broadcast=True) # for", "will hold information about run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() if len(self.ledock_directories) ==", "to O(n.d) where d is the average number of neighbors, while original DBSCAN", "filename = self.sim_folder_run + os.sep + self.receptor_name + '_' + self.molecule_name + '.json'", "for dirname, dirnames, filenames in os.walk(folder): for i in filenames: # print i", "HERE # #big_future = client.scatter(pop_item, workers=[workstation_address], hash=False) big_future = pop_item task_g_mmpbsa = client.submit(run_dask_tools.run_gmmpbsa_using_dask,", "NUMCHROM PRINTCHR 10 ''' generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' + self.ligand_name", "Float in interval [0.0,1.0] from fully permeable to no permeability ROTPER 0.8 #", "--receptor {0} \" \\ \"--ligand {1} \" \\ \"--center_x {2} \" \\ \"--center_y", "index_file = open(index_abs, 'r') index_mem = index_file.read() index_filename = index_abs.split(os.sep)[-1] # data_pre =", "before transformation # Add receptor name def set_mgltools_path(self, path): print('MGLTools path is set", ">>> >>> >>> #This is for rDock, and it works so comment this", "from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED", "possible INTRAGEN # Specifies that the initial population is generated randomly POPINIMT RANDOM", "run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Passed running ', run_name) # submitted_jobs_dask_temp, finished_jobs_temp = self.check_dask_jobs(submitted_jobs_dask,finished_jobs)", "queue_jobs.pop(curr_index) key_name = pop_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Cur run ', run_name) if", "key_name = pop_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Cur run ', run_name) if curr_index", "data before transformation # Add receptor name def set_mgltools_path(self, path): print('MGLTools path is", "retries=retries_num) # # # TODO This part needs further refinement # # #", "!= 40: # run_docking_queue += run_docking_LeDock[curr_LeDock:curr_LeDock + LeDock] # curr_LeDock += LeDock #", "should I add json saving of information or not? def load_state_data_json(self, filename): '''", "\\n') print(self.ledock_directories) folder_utils.create_folder(self.ledock_absolute_folder_name) test = 1 self.receptor_ledock_pdb = \"{0}.pdb\".format(self.receptor_name) self.ligand_ledock_mol2 = \"{0}.mol2\".format(self.ligand_name) self.absolute_receptor_ledock_pdb", "'.dat' generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat return [generate_ga_dat, ] # self.generate_ga_dat_object_file", "= self.state_data['dockSoftware']['LeDock']['simStates'] # self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] # print('No need to generate LeDock commands')", "# # self.prepare_uber_docker() # self.prepare_ledock_settings() # self.prep_LeDock_dock_run_commands() @hlp.timeit def prep_LeDock_dock_run_commands(self, num_samples=10): ''' Prepare", "self.receptor_name, 'simRunFolder': self.sim_folder_run, 'RunFolder': self.g_mmpbsa_folder, 'absoluteJsonStates': self.absolute_json_state_file, 'directory': self.directories, 'folderCreated': self.folder_exists, 'simStates': {}})", "retries_num = 2 curr_index = 0 curr_worker = 0 # prepare worker ids", "+ FlexAid] # # curr_FlexAid += FlexAid # # run_docking_queue += run_docking_Vina[curr_Vina:curr_Vina +", "# self.rdock_title = self.ledock_data['LeDock_params']['title'] # # self.receptor_file_ledock = self.ledock_data['LeDock_params']['receptorFile'] # self.ledock_rmsd = self.ledock_data['LeDock_params']['LeDockRMSD']", "trajectory to number of free cores # TODO article Pagadala Software for molecular", "chromosomes (number individuals in the population) # Integer in interval [1-N] NUMCHROM 500", "definition # Value in [B,R,X] for Bissecting, Radical and Extended radical plane #", "including flexible side-chains #SCOLIG # Ends reading of CONFIG file ENDINP ''' final_str", "in degrees for the anchor dihedral of the ligand # Float in interval", "+ self.receptor_name + '_' + self.molecule_name + '.json' if filename is None and", "initial LeDock try: self.mdtraj_frames = self.state_data['energySoftware']['g_mmpbsa']['frames'] self.mdtraj_parts = self.state_data['energySoftware']['g_mmpbsa']['parts'] self.file_save_list = self.state_data['energySoftware']['g_mmpbsa']['fileList'] self.abs_file_save_list", "self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex': self.first_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex': self.second_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile': self.index_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile': self.mdp_file})", "Integer in interval [1,N] where N is NUMCHROM PRINTCHR 10 ''' self.generate_ga_dat_pameters =", ":return: ''' try: if self.setup_ledock_pameters is not False: # print(\"Running Vina\") # TODO", "out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contrib_apol_filename']", "self.z_size, self.samples_exhaust, self.num_modes, self.save_run_name, self.save_run_name) print(command_to_run) self.command_samples_run_list.append(command_to_run) print(\"Launching new Sim\") self.state_data_samples['simStates'].update({str(sample_num): {'save_run_name': self.save_run_name,", "= True def get_uber_g_mmpbsa_run_folder_name(self): curr_folder = os.getcwd() return curr_folder + os.sep + self.run_folder_name", "run folder and initial json configuration :return: ''' self.run_folder_name = self.receptor_name + '_'", "big_future, workers=[workstation_address], key=run_name, retries=retries_num) submitted_jobs_dask.append(task_g_mmpbsa) else: key_name = curr_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id)", "# self.state_data_samples = self.state_data.copy() # # self.save_state_data_json() # TODO this part needs to", "1 run_g_mmpbsa.append(data) # # result = run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num, data) # # test = 1", "OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO", "# Intragenic crossovers are possible INTRAGEN # Specifies that the initial population is", "part needs further refinement # # # break # # test = 1", "''' # self.absolute_path = os.path.abspath(filename) self.load_state_called = True print(os.path.abspath(__file__)) self.state_data = json.load(open(filename, \"r\"))", "print('Finished checking dask submissions ---\\n') print('---' * 10) return finished_jobs, finished_jobs_dict # @hlp.timeit", "print('---' * 10) print('\\n') def prepare_for_dask_cluster(self, LeDock=2, rDock=2, FlexAid=2, Vina=2, parallel=False): ''' run", "# test = 1 # # try: # self.setup_ledock_pameters = self.ledock_data['setup_LeDock'] # self.ledock_num_samples", "except: # print('LeDock_params simStates is empty verify yolo') # # test = 1", "'secondIndex':self.second_index, 'indexFile':self.index_file, 'mdpFile':self.mdp_file, 'tprFile':self.tpr_file}) energy_mm = 'energy_MM_{0}.xvg'.format(i) polar = 'polar_{0}.xvg'.format(i) apolar = 'apolar_{0}.xvg'.format(i)", "states if len(self.directories_samples) == 0: print('Creating folder for vina samples run\\n') print('Vina run", "math # Free core approach div_traj = math.ceil(traj_len/total_free_cores) # select_indexes = list(range(total_free_cores)) #", "= dask_client # Testing Phase total_free_cores = 16 # Production # worker_status =", "= self.state_data['mdpFile'] self.tpr_file = self.state_data['tprFile'] self.index_file = self.state_data['indexFile'] self.folder_path = self.state_data['folderPath'] self.run_type =", "in run_mmpbsa_queue: # print(pre_job) if pre_job['runFinished'] is False: final_queue_job.append(pre_job) test = 1 self.run_mmpbsa_dask", "one or multiple cleft(s) as binding-site rgnopt_locclf = 'RNGOPT LOCCLF ' + 'global_binding_site.pdb\\n\\n'", "{str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'commandToClean':command_to_clean, 'dokFileName':ligand_clear_dok, 'runFinished': False}} self.LeDock_sim_states.update(temp_dict) self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict) # try:", "dirnames, filenames in os.walk(folder_path): # print(dirname, '-') if dir_name in dirname: # #", "set to ', path) self.flexaid_path = path def set_ledock_path(self, path): print('LeDock path is", "run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Cur run ', run_name) if curr_index == 0: curr_index", "folder_utils.create_folder(self.sim_folder_run) self.folder_exists = True programs_dict = {'energySoftware': {'g_mmpbsa': {}}} self.state_data.update({'trajectoryFile': self.trajectory_file, 'mdpFile': self.mdp_file,", "'LeDock' self.ledock_absolute_folder_name = self.uber_dock_folder + os.sep + self.ledock_folder_name test = 1 # self.directories", "path) self.mgltools_utilities = path def set_flexaid_path(self, path): print('FlexAid path is set to ',", "+ '.dat' self.generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w')", "# elif 'vina_sample_' in i: # VIP.append(i) return VIP except Exception as e:", "self.state_data['energySoftware']['g_mmpbsa']['simStates'] test = 1 self.g_mmpbsa_prepared = self.state_data['energySoftware']['g_mmpbsa']['prepare'] # self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) # self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared})", "submissions ---\\n') print('---' * 10) return finished_jobs, finished_jobs_dict # @hlp.timeit def run_dask_gmmpbsa(self, client=None,", "tpr_mem}) data['dask'].update({'mdpName': mdp_filename, 'mdpMem': mdp_mem}) data['dask'].update({'indexName': index_filename, 'indexMem': index_mem}) test = 1 #", "1 # Local upload test # big_future = self.dask_prep # run_dask_tools.upload_g_mmpbsa_files_dask(big_future) #TODO #", "PRINTCHR 10 ''' generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' + self.ligand_name +", "= simStates test = 1 self.g_mmpbsa_prepared = True self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) self.state_data['energySoftware']['g_mmpbsa'].update({'parts':", "+ self.lepro_pdb_file, 'abs_receptor_pdb': self.absolute_receptor_ledock_pdb, 'abs_ligand_mol2': self.absolute_ligand_ledock_mol2, 'LeDockFolderStatus': self.ledock_folder_exists, 'LeDockAbsFolder': self.ledock_absolute_folder_name, 'LeDockFolderName': self.ledock_folder_name}) self.save_state_data_json()", "def check_dask_jobs(self, submitted_jobs_dask, finished_jobs, finished_jobs_dict): import copy # modified_submitted_jobs_dask = copy.deepcopy(submitted_jobs_dask) for i,", "= Outputfile(\"sd\", \"{0}.sd\".format(self.ligand_name)) # self.output_ligand_rdock.write(self.ligand_pybel ) # self.output_ligand_rdock.close() self.ledock_folder_name = self.receptor_name + '_'", "= self.state_data_samples['folderCreated'] self.x_center = self.state_data_samples['boxSettings']['center_x'] self.y_center = self.state_data_samples['boxSettings']['center_y'] self.z_center = self.state_data_samples['boxSettings']['center_z'] self.x_size =", "# worker_info = get_worker_free[worker] # worker_address = worker_info['preped']['workerAddress'] # # retries_num = 2", "+ rDock] # curr_rDock += rDock # # run_docking_queue += run_docking_FlexAid[curr_FlexAid:curr_FlexAid + FlexAid]", "if len(self.ledock_directories) == 0: print('Creating rdock folder in uberDocker folder \\n') print(self.ledock_directories) folder_utils.create_folder(self.ledock_absolute_folder_name)", "and binary forms, with or without # modification, are permitted provided that the", "the solvent interactions are derived from the interaction matrix # Float in interval", "self.state_data['mdpFile'] self.tpr_file = self.state_data['tprFile'] self.index_file = self.state_data['indexFile'] self.folder_path = self.state_data['folderPath'] self.run_type = self.state_data['runType']", "= 1 #self.g_mmpbsa_sim_states = self.state_data['energySoftware']['g_mmpbsa']['simStates'] #self.ledock_samples = self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list'] # Divide trajectory to number", "= self.state_data_samples['boxSettings']['center_z'] self.x_size = self.state_data_samples['boxSettings']['size_x'] self.y_size = self.state_data_samples['boxSettings']['size_y'] self.z_size = self.state_data_samples['boxSettings']['size_z'] self.num_modes =", "Sparse neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- \"\"\"", "# for worker in get_worker_free: # worker_info = get_worker_free[worker] # worker_address = worker_info['preped']['workerAddress']", "EPI_uber_dock.prepVinaSim_uberDock() >>> EPI_uber_dock.runVinaSim_uber() Molecule object loading of pdb and pbdqt file formats. Then", "prep_g_mmpbsa is False: print('prep gmmpbsa ', prep_g_mmpbsa) return 'Do not prepare run files'", "AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL", "well as numpy list. Read more in the :ref:`User Guide <MoleculeObject>`. Parameters ----------", "# from molmolpy.moldock import run_dask_tools from molmolpy.tools import run_dask_tools test = 1 curr_client", "len(finished_jobs) != job_quantity: finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) time.sleep(60) print('->' * 10)", "commands') except: self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list': self.ledock_samples}) self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states) for sample_num in self.ledock_samples: self.prep_LeDock_dock_command(sample_num) print('Now continue for", "verify yolo') # # try: # self.LeDock_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] # self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list']", "print('G_mmpbsa is empty verify yolo') # # test = 1 # # try:", "0 curr_worker = 0 # prepare worker ids for easier switch worker_ids =", "self.state_data_samples['folderPath'] self.run_type = self.state_data_samples['runType'] self.molecule_name = self.state_data_samples['molName'] self.receptor_name = self.state_data_samples['receptorName'] # TODO test", "# self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param': self.generate_ga_dat_pameters, # 'GA_DataName': self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull':", "in the population) # Integer in interval [1-N] NUMCHROM 500 # Number of", "print(i) temp = i.split(' ') print(temp) flex_index = temp[-2] flexible_index_list.append(int(flex_index)) temp_line = 'OPTIMZ", "needs to be thought out #################################################################################################################### def prepare_samples_collection_run(self, standard_exhaust=128, num_samples_run=100, run_type='samples_run'): if self.setup_box", "center of mass :return: ''' self.trajectory_mdtraj = md.load_xtc(self.trajectory_file, top=self.topology_file) self.trajectory_mdtraj_topology = self.trajectory_mdtraj.topology self.trajectory_mdtraj_topology_dataframe", "down # self.hold_nSec(5) print('This success ---> ', i) except Exception as error: print('error", "self.run_folder_name_samples) print('TADA ', self.directories_samples) self.json_samples_state_file = self.sim_folder_run_samples + os.sep + self.receptor_name + '_'", "itertools import time color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold', 'darkorange']) import multiprocessing import", "self.lepro_pdb_file, 'abs_receptor_pdb': self.absolute_receptor_ledock_pdb, 'abs_ligand_mol2': self.absolute_ligand_ledock_mol2, 'LeDockFolderStatus': self.ledock_folder_exists, 'LeDockAbsFolder': self.ledock_absolute_folder_name, 'LeDockFolderName': self.ledock_folder_name}) self.save_state_data_json() self.load_state_called", "\"--center_y {3} \" \\ \"--center_z {4} \" \\ \"--size_x {5} \" \\ \"--size_y", "+ '_' + 'LeDock' self.ledock_absolute_folder_name = self.uber_dock_folder + os.sep + self.ledock_folder_name test =", "worker_ids[curr_worker] workstation_info_temp = get_worker_free_temp[curr_worker_id] workstation_preped_temp = workstation_info_temp['preped'] workstation_address = workstation_preped_temp['workerAddress'] # This way", "'vina_sample' >>> >>> >>> >>> receptor_file = EPI_folder + os.sep + 'centroid.pdb' >>>", "self.absolute_json_state_file = self.g_mmpbsa_folder + os.sep + self.receptor_name + '_' + self.molecule_name + '_'", "+ os.sep + self.run_folder_name_samples # Create folder don't forget # Exhaustiveness for all", "ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ------------------------------------------------------------------------ --> import itertools import", "self.samples_run, 'folderPath': self.folder_path, 'runType': self.run_type_samples, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run_samples, 'directory': self.directories_samples,", "interval [0.5,3.0] CLRMSD 2.0 # Number of results/docking poses to output MAXRES 20", "This part needs clarification self.prep_mdtraj_object() # original data before transformation # Add receptor", "from fully permeable to no permeability ROTPER 0.8 # Solvent term penalty #", "temp_state[str(i)].update({'energyMM':energy_mm, 'polar':polar, 'apolar':apolar, 'contrib_MM':contrib_mm, 'contrib_pol':contrib_pol, 'contrib_apol':contrib_apol}) temp_traj.save(abs_file_save) temp_state[str(i)].update({'fileSaved': True }) simStates['simStates'].update(temp_state) self.mdtraj_frames =", "+ 'EPI.pdb' >>> molname = 'EPI' >>> receptor_name = 'LasR' >>> >>> >>>", ">>> receptor_file = EPI_folder + os.sep + 'centroid.pdb' >>> ligand_file = EPI_folder +", "return 'Do not prepare run files' if self.g_mmpbsa_prepared is True: print('Do not prep", "# # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param': self.generate_ga_dat_pameters, # 'GA_DataName': self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs, #", "necessary since info is inside pdbqt file # elif 'vina_sample_' in i: #", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY", "= False self.g_mmpbsa_sim_states = {'simStates': {}} self.objects_loaded = False self.g_mmpbsa_prepared = False #", "# # result = run_dask_tools.run_gmmpbsa_using_dask(job_test) test = 1 # Local upload test #", "in enumerate(get_worker_free): worker_ids.update({i: id}) custom_index_curr = 3 while len(queue_jobs) > 0: if curr_index", "VCTPLA R # Use normalized surfaces in contacts NORMAR # Define the RMSD", "pdbnam final_str += inplig final_str += rgnopt_locclf final_str += optimz1 final_str += optimz2", "the ligand # The allowable flexible bonds are listed as FLEDIH lines in", "written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "results_dask['energyMM_filename'] out_mem = results_dask['energyMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder", "= molname self.receptor_name = receptor_name self.run_type = 'g_mmpbsa' self.state_data = {} self.state_data_samples =", "# copyright notice, this list of conditions and the following # disclaimer in", ">>> #This is for rDock, and it works so comment this part for", "self.state_data['energySoftware'][prog] abs_folder = self.g_mmpbsa_folder # original_data['AbsFolder'] out_name = abs_folder + os.sep + results_dask['out_filename']", "TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE", "= 1 self.run_mmpbsa_dask = final_queue_job # random.shuffle(self.run_docking_queue) print('Finished preparing g_mmpbsa jobs') # TODO", "run_g_mmpbsa # run_docking_queue = run_docking_LeDock + run_docking_FlexAid + run_docking_Vina final_queue_job = [] #", "{}}) traj_abs = data['absFileSave'] traj_file = open(traj_abs, 'rb') traj_mem = traj_file.read() traj_filename =", "print('LeDock_params simStates is empty verify yolo') # # test = 1 def prepare_g_mmpbsa(self):", "binding-site rgnopt_locclf = 'RNGOPT LOCCLF ' + 'global_binding_site.pdb\\n\\n' # Specify the degrees of", "# modified_submitted_jobs_dask = copy.deepcopy(submitted_jobs_dask) for i, job in enumerate(submitted_jobs_dask): status = job.status if", "'_' + self.run_type self.sim_folder_run = self.folder_path + os.sep + self.run_folder_name # Create folder", "len(queue_jobs) finished_jobs = [] finished_jobs_dict = {} worker_status_free = None test = 1", "final_queue_job # random.shuffle(self.run_docking_queue) print('Finished preparing g_mmpbsa jobs') # TODO should I add json", "self.ligand_name + '.dat' self.generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs,", "'indexMem': index_mem}) test = 1 # data['dask'].update({'cavFile':cav_file_mem }) # self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)] = data test", "= workstation_preped_temp['workerAddress'] # This way folder is buggy workstation_dir = original_get_worker_free[curr_worker_id]['preped']['workerDir'] workstation_freemem =", "the unique RESNUMC identifier LIG9999A inplig = 'INPLIG ' + '{0}.inp\\n\\n'.format( self.ligand_flexaid_initials) #", "self.receptor_name = self.state_data_samples['receptorName'] # TODO test self.samples_exhaust = self.state_data_samples['samples_exhaust'] self.sim_folder_run_samples = self.state_data_samples['simRunFolder'] #", "self.save_state_data_json(filedata=self.before_dask, filename=self.absolute_json_state_file) # allow CPU to cool down # self.hold_nSec(5) print('This success --->", "the next line #EXCHET # Include water molecules in the target (always removed", "self.tpr_file}) self.save_state_data_json() test = 1 #self.g_mmpbsa_sim_states = self.state_data['energySoftware']['g_mmpbsa']['simStates'] #self.ledock_samples = self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list'] # Divide", "import copy # modified_submitted_jobs_dask = copy.deepcopy(submitted_jobs_dask) for i, job in enumerate(submitted_jobs_dask): status =", "1 # # # Try to load initial LeDock try: self.mdtraj_frames = self.state_data['energySoftware']['g_mmpbsa']['frames']", "self.run_mmpbsa_dask job_quantity = len(queue_jobs) finished_jobs = [] finished_jobs_dict = {} worker_status_free = None", "poses to output MAXRES 20 # Only output scored atoms in the final", "'simStates': {}}) self.prepVinaSim_samples() self.save_state_data_json(filedata=self.state_data_samples, filename=self.json_samples_state_file) self.load_state_called_samples = False self.prep_sample_run = True else: self.load_state_file_samples", "command_to_run, 'runFinished': False}}) # try: # os.system(command_to_run) # except KeyboardInterrupt: # # quit", "fully permeable to no permeability PERMEA 0.9 # Permeability for side-chain rotamer acceptance", "\" \\ \"--size_z {7} \" \\ \"--exhaustiveness {8} \" \\ \"--num_modes {9} \"", "if 'FLEDIH' in i: print(i) temp = i.split(' ') print(temp) flex_index = temp[-2]", "standard_exhaust self.samples_run = list(range(1, num_samples_run + 1)) self.run_folder_name_samples = self.receptor_name + '_' +", "= self.state_data.copy() self.save_state_data_json() # TODO this part needs to be thought out ####################################################################################################################", "ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED", "EPI_folder + os.sep + 'EPI.pdbqt' >>> molname = 'EPI' >>> receptor_name = 'LasR'", "g_mmpbsa :return: ''' self.prepare_g_mmpbsa() test = 1 curr_client = dask_client # Testing Phase", "out_mem = results_dask['contrib_pol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() # out_pdbqt_filename = out_pdbqt_name", "THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ------------------------------------------------------------------------", "get_samples_run_folder_name(self): curr_folder = os.getcwd() print(\"Yippie yi kay\", curr_folder) return curr_folder + os.sep +", "receptor_name = 'LasR' >>> run_type = 'vina_sample' >>> >>> >>> >>> receptor_file =", "run_name) # submitted_jobs_dask_temp, finished_jobs_temp = self.check_dask_jobs(submitted_jobs_dask,finished_jobs) finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) test", "Parameters of the shared fitness function # Floats in interval [0.0,1000.0] SHAREALF 4.0", "= self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)] save_run_name = \"g_mmpbsa_part_{0}\".format(part_num) data.update({'Program': 'g_mmpbsa'}) data.update({'part_num': part_num}) data.update({'save_run_name': save_run_name}) data.update({'dask': {}})", "data :return: ''' # self.absolute_path = os.path.abspath(filename) self.load_state_called = True print(os.path.abspath(__file__)) self.state_data =", "is None: # # filename = self.json_state_file # filename = self.absolute_json_state_file # filedata", "folder and initial json configuration :return: ''' self.run_folder_name = self.receptor_name + '_' +", "'firstIndex':self.first_index, 'secondIndex':self.second_index, 'indexFile':self.index_file, 'mdpFile':self.mdp_file, 'tprFile':self.tpr_file}) energy_mm = 'energy_MM_{0}.xvg'.format(i) polar = 'polar_{0}.xvg'.format(i) apolar =", "has 5 flexible bonds flexible_bonds_data = open( self.flexaid_absolute_processed_files_folder + os.sep + '{0}.inp'.format(self.ligand_flexaid_initials), 'r')", "This will be for leDock # if prep_g_mmpbsa is True: # # self.prepare_uber_docker()", "workstation_preped_temp['freeCores'] curr_item_prog = curr_item['Program'] ############################################################ # submitted_jobs_dask len less than 16 jobs_running =", "self.save_state_data_json() self.load_state_called = False self.ledock_title = self.receptor_name + '_' + self.ligand_name + '_LeDock", "run_dask_tools from molmolpy.tools import run_dask_tools test = 1 curr_client = client worker_status =", "submitted_jobs_dask.append(task_g_mmpbsa) else: key_name = curr_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Passed running ', run_name)", "(2002) Bioinformatics. 18(10); 1365-1373 VCTPLA R # Use normalized surfaces in contacts NORMAR", "CONFIG file ENDINP ''' final_str = '''''' # Specify the processed target file", "OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR", "= 'INPLIG ' + '{0}.inp\\n\\n'.format( self.ligand_flexaid_initials) # Specify to use one or multiple", "self.state_data['directory'] self.folder_exists = self.state_data['folderCreated'] self.absolute_json_state_file = self.state_data['absoluteJsonStates'] self.g_mmpbsa_folder = self.state_data['RunFolder'] self.json_state_file = self.state_data['jsonStates']", "\"{0}.mol2\".format(self.ligand_name) self.absolute_receptor_ledock_pdb = self.ledock_absolute_folder_name + os.sep + self.receptor_ledock_pdb self.absolute_ligand_ledock_mol2 = self.ledock_absolute_folder_name + os.sep", "self.ligand_ledock_mol2, 'lepro_pdb': self.lepro_pdb_file, 'lepro_abs_pdb': self.ledock_absolute_folder_name + os.sep + self.lepro_pdb_file, 'abs_receptor_pdb': self.absolute_receptor_ledock_pdb, 'abs_ligand_mol2': self.absolute_ligand_ledock_mol2,", "INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT,", "= update_results[key] # results_dask = results[key]['dask'] # else: # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)] = results[key] #", "# TODO # This part runs the main program submitted_jobs = [] submitted_jobs_dask", "TEST IT WORKS # queue_jobs = self.run_mmpbsa_dask # job_test = queue_jobs[0] # #", "= run_dask_tools.get_dask_worker_status(curr_client, custom_index=custom_index_curr) get_worker_free_temp = run_dask_tools.check_free_resources(worker_status_temp) custom_index_curr += 2 print('----------------TEST------------------') curr_item = queue_jobs[curr_index]", "second_index, molname='Unknown', receptor_name='Unknown', folder_path='.', job_name = 'Unknown', load_state_file=None): self.load_state_file = load_state_file if load_state_file", "'rb') traj_mem = traj_file.read() traj_filename = data['fileSave'] data['dask'].update({'tprName': tpr_filename}) data['dask'].update({'mdpName': mdp_filename}) data['dask'].update({'indexName': index_filename})", "test self.sim_folder_run = self.state_data['simRunFolder'] # .split('/')[-1] self.directories = self.state_data['directory'] self.folder_exists = self.state_data['folderCreated'] self.absolute_json_state_file", "of new individuals to generate at each generation # Only considered when REPMODEL", "See McConkey et al. (2002) Bioinformatics. 18(10); 1365-1373 VCTPLA R # Use normalized", "self.directories = self.state_data['directory'] self.folder_exists = self.state_data['folderCreated'] self.absolute_json_state_file = self.state_data['absoluteJsonStates'] self.g_mmpbsa_folder = self.state_data['RunFolder'] self.json_state_file", "TODO this part needs to be thought out #################################################################################################################### def prepare_samples_collection_run(self, standard_exhaust=128, num_samples_run=100,", "self.run_type = self.state_data_samples['runType'] self.molecule_name = self.state_data_samples['molName'] self.receptor_name = self.state_data_samples['receptorName'] # TODO test self.samples_exhaust", "finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) time.sleep(60) print('->' * 10) print('Everything is finished", "# task = client.submit(run_dask_tools.run_vina_using_dask, # # data, # # workers=[work_address], # # key='key_test',", "= 2 # # # Upload files to all clients client.upload_file # task", "= folder_to_save + os.sep + file_save file_save_list.append(file_save) abs_file_save_list.append(abs_file_save) temp_state[str(i)].update({'runFinished':False, 'index':i, 'absFolder':folder_to_save, 'fileSave':file_save, 'absFileSave':abs_file_save,", "'contrib_MM_{0}.dat'.format(i) contrib_pol = 'contrib_pol_{0}.dat'.format(i) contrib_apol = 'contrib_apol_{0}.dat'.format(i) temp_state[str(i)].update({'energyMM':energy_mm, 'polar':polar, 'apolar':apolar, 'contrib_MM':contrib_mm, 'contrib_pol':contrib_pol, 'contrib_apol':contrib_apol})", "= self.ledock_data['LeDock_params']['ymin'] # self.ledock_ymax = self.ledock_data['LeDock_params']['ymax'] # self.ledock_zmin = self.ledock_data['LeDock_params']['zmin'] # self.ledock_zmax =", "The variation in degrees for flexible dihedrals of the ligand # Float in", "REPMODEL is STEADY # Integer in interval [1,N-1] where N is NUMCHROM STEADNUM", "are permitted provided that the following conditions are # met: # * Redistributions", ">>> EPI_uber_dock = uber_docker.UberDockerObject(receptor_file, ligand_file, '.', molname=molname, receptor_name=receptor_name) >>> >>> >>> EPI_uber_dock.prepare_uber_dock_protocol() >>>", "self.ledock_title = self.receptor_name + '_' + self.ligand_name + '_LeDock Parameter file' self.ledock_rmsd =", "= self.state_data_samples['receptorName'] # TODO test self.samples_exhaust = self.state_data_samples['samples_exhaust'] self.sim_folder_run_samples = self.state_data_samples['simRunFolder'] # .split('/')[-1]", "LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR", "self.rdock_folder_name) # print('TADA ', self.directories) test = 1 # This will hold information", "path) self.ledock_path = path def prep_mdtraj_object(self): ''' Prepare receptor mdtraj object get mdtraj", "using lepro # ./lepro_linux_x86 LasR_flexaid.pdb os.chdir(self.ledock_absolute_folder_name) command_receptor = self.ledock_path + os.sep + 'lepro_linux_x86'", "'_' + self.molecule_name + '.json' if filename is None and filedata is None:", "a quiiee # # retries_num = 2 # # task = client.submit(run_dask_tools.run_vina_using_dask, #", "REPMODEL is BOOM BOOMFRAC 1.0 # Number of new individuals to generate at", "self.absolute_ligand_ledock_mol2, overwrite=True) self.ledock_folder_exists = True test = 1 # TODO enter ledock folder", "Calculate pdb receptor center of mass :return: ''' self.trajectory_mdtraj = md.load_xtc(self.trajectory_file, top=self.topology_file) self.trajectory_mdtraj_topology", "self.save_state_data_json() test = 1 self.prep_LeDock_run = True @hlp.timeit def prep_LeDock_dock_command(self, sample_num, pose_gen=20): '''", "# # # TODO this works need to create a quiiee # #", "ligand_file = EPI_folder + os.sep + 'EPI.pdb' >>> molname = 'EPI' >>> receptor_name", "\\ \"--size_y {6} \" \\ \"--size_z {7} \" \\ \"--exhaustiveness {8} \" \\", "final_str # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param': self.generate_ga_dat_pameters, # 'GA_DataName': self.generate_ga_dat,", "workstation_freecpu = workstation_preped_temp['freeCores'] curr_item_prog = curr_item['Program'] ############################################################ # submitted_jobs_dask len less than 16", "object get mdtraj topology and save as pandas dataframe Calculate pdb receptor center", ">>> >>> >>> EPI_uber_dock.prepare_uber_docker() >>> >>> >>> #This is for rDock, and it", "# To exclude these groups, uncomment the next line #EXCHET # Include water", "WORKS # queue_jobs = self.run_mmpbsa_dask # job_test = queue_jobs[0] # # result =", "def flexaid_generate_config_input_dask(self): ''' Generate flexaid config input file Flexaid is very strict about", "self.ledock_input_info[str(sample_num)] parm_name = sample_data['ledock_parm_name'] test = 1 self.save_run_name = \"ledock_{0}_sample_{1}\".format(self.run_type, sample_num) random_seed =", "curr_Vina += Vina # # test = 1 # test = 1 run_mmpbsa_queue", "in contact COMPLF VCT # Do not consider intramolecular interactions NOINTR # Side-chain", "load_state_file=None): self.load_state_file = load_state_file if load_state_file is not None: self.load_state_data_json(self.load_state_file) else: print('G_MMPBSA Object", "# .split('/')[-1] self.directories = self.state_data['directory'] self.folder_exists = self.state_data['folderCreated'] self.absolute_json_state_file = self.state_data['absoluteJsonStates'] self.g_mmpbsa_folder =", "# Floats in interval [0.0,1000.0] SHAREALF 4.0 SHAREPEK 5.0 SHARESCL 10.0 # Reproduction", "1.molmolpy 2. pybel Stores molecule information in pandas dataframe as well as numpy", "FITMODEL PSHARE # Parameters of the shared fitness function # Floats in interval", "SUCH DAMAGE. # ------------------------------------------------------------------------ --> import itertools import time color_iter = itertools.cycle(['navy', 'c',", "[] temp_mdtraj_indexes = [] file_save_list = [] abs_file_save_list = [] simStates = {'simStates':{}}", "# # quit # sys.exit() print(\"LeDock command generation finished\") else: print('Please setup LeDock", "# self.param_ledock_template = self.ledock_data['paramFull'] # except: # print('LeDock setting part is empty verify", "permeable to no permeability ROTPER 0.8 # Solvent term penalty # When the", "self.y_center = self.state_data_samples['boxSettings']['center_y'] self.z_center = self.state_data_samples['boxSettings']['center_z'] self.x_size = self.state_data_samples['boxSettings']['size_x'] self.y_size = self.state_data_samples['boxSettings']['size_y'] self.z_size", "create # Only considered when REPMODEL is BOOM BOOMFRAC 1.0 # Number of", "uncomment the next line #INCHOH # Permeability allowed between atoms # Float in", "+ self.generate_ga_dat return [generate_ga_dat, ] # self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') # self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) #", "print('Last Check of submitted jobs') while len(finished_jobs) != job_quantity: finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask,", "self.state_data['simRunFolder'] # .split('/')[-1] self.directories = self.state_data['directory'] self.folder_exists = self.state_data['folderCreated'] self.absolute_json_state_file = self.state_data['absoluteJsonStates'] self.g_mmpbsa_folder", "Use Vcontacts in the calculations of surfaces in contact COMPLF VCT # Do", "# This part needs clarification self.prep_mdtraj_object() # original data before transformation # Add", "full_g_mmpbsa_data['tprFile'] tpr_file = open(tpr_abs, 'rb') tpr_mem = tpr_file.read() tpr_filename = tpr_abs.split(os.sep)[-1] # mdp_abs=", "example >>> EPI_folder = '/media/Work/MEGA/Programming/StressHormones/dock_EPI' >>> EPI_samples = '/media/Work/MEGA/Programming/StressHormones/' >>> >>> >>> receptor_file", "self.index_file, 'runFolderName': self.run_folder_name, 'folderPath': self.folder_path, 'jsonStates': self.json_state_file, 'runType': self.run_type, 'molName': self.molecule_name, 'receptorName': self.receptor_name,", "out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() # out_pdbqt_filename = out_pdbqt_name # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )]", "40: # run_docking_queue += run_docking_LeDock[curr_LeDock:curr_LeDock + LeDock] # curr_LeDock += LeDock # #", "len(submitted_jobs_dask) == 1: curr_index = 0 else: curr_index += 1 curr_worker += 1", "finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) test = 1 ###################################################3 # update index #", "'runFinished': False}} self.LeDock_sim_states.update(temp_dict) self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict) # try: # os.system(command_to_run) # except KeyboardInterrupt: # #", "# print('No need to generate LeDock commands') # self.prep_LeDock_run = True # except:", "self.generate_ga_dat_object_file.close() self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) self.state_data['dockSoftware']['FlexAid']['GA_params'].update( {'generateGA_param': self.generate_ga_dat_pameters, 'GA_DataName': self.generate_ga_dat, 'GA_DATA_Abs': self.generate_ga_dat_name_abs, 'GA_ParamFull': self.flexaid_ga_dat_param_template}) #", "ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE", "The variation in degrees for the anchor dihedral of the ligand # Float", "'absFileSave':abs_file_save, 'firstIndex':self.first_index, 'secondIndex':self.second_index, 'indexFile':self.index_file, 'mdpFile':self.mdp_file, 'tprFile':self.tpr_file}) energy_mm = 'energy_MM_{0}.xvg'.format(i) polar = 'polar_{0}.xvg'.format(i) apolar", "print('No need to generate LeDock commands') # self.prep_LeDock_run = True # except: #", "matplotlib.style.use('ggplot') sns.set(style=\"darkgrid\") low_seed = 1 high_seed = 999999999 mgltools_utilities = '/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24' class GMMPBSAObject(object):", "# print('Last Check of submitted jobs') while len(finished_jobs) != job_quantity: finished_jobs, finished_jobs_dict =", "self.state_data_samples = self.state_data.copy() self.save_state_data_json() # TODO this part needs to be thought out", "the shared fitness function # Floats in interval [0.0,1000.0] SHAREALF 4.0 SHAREPEK 5.0", "way folder is buggy workstation_dir = original_get_worker_free[curr_worker_id]['preped']['workerDir'] workstation_freemem = workstation_preped_temp['freeMemory'] workstation_freecpu = workstation_preped_temp['freeCores']", "= json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.trajectory_file = self.state_data['trajectoryFile'] self.mdp_file = self.state_data['mdpFile'] self.tpr_file =", "' + '{0}_ref.pdb\\n\\n'.format( self.ligand_flexaid_initials) final_str += rmsdst final_str += flexaid_config_input_template generate_config_input_file = 'CONFIG_'", ")] = results[key] # self.state_data['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] # results_dask =", "############################################################################## def flexaid_generate_ga_dat_parameters(self): ''' Generate GA dat parameters for flexaid docking :return: '''", "set_molecule_name(self, mol_name): self.molecule_name = mol_name def set_receptor_name(self, receptor_name): self.receptor_name = receptor_name # This", "MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT", "leDock # if prep_g_mmpbsa is True: # # self.prepare_uber_docker() # self.prepare_ledock_settings() # self.prep_LeDock_dock_run_commands()", "{}}) data_pre = {} data_pre.update({'tprName':tpr_filename, 'tprMem':tpr_mem}) data_pre.update({'mdpName':mdp_filename, 'mdpMem':mdp_mem}) data_pre.update({'indexName':index_filename, 'indexMem':index_mem}) self.dask_prep = data_pre", "thought out #################################################################################################################### def flexaid_generate_ga_dat_parameters_dask(self): ''' Generate GA dat parameters for flexaid docking", "+ '.dok' # -spli MOR_flexaid.dok command_to_clean = \"{0} -spli {1}\".format(command_receptor, ligand_clear_dok) print(command_to_run) self.LeDock_command_run_list.append(command_to_run)", "than 16 jobs_running = len(submitted_jobs_dask) - len(finished_jobs) max_jobus = max_jobs_to_run # g_mmpbsa part", "state print('-------') if curr_index == 0 and len(submitted_jobs_dask) == 1: curr_index = 0", "self.mdtraj_parts}) # self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) except: print('G_mmpbsa is empty", "# except: # print('LeDock_params simStates is empty verify yolo') # # test =", "part needs to be thought out #################################################################################################################### def flexaid_generate_ga_dat_parameters_dask(self): ''' Generate GA dat", "inplig final_str += rgnopt_locclf final_str += optimz1 final_str += optimz2 for y in", "# with the distribution. # * Neither the name of the molmolpy Developers", "# self.absolute_path = os.path.abspath(filename) self.load_state_called_samples = True print(os.path.abspath(__file__)) self.state_data_samples = json.load(open(filename, \"r\")) #", "#This is for rDock, and it works so comment this part for a", "= self.state_data['energySoftware'][prog] abs_folder = self.g_mmpbsa_folder # original_data['AbsFolder'] out_name = abs_folder + os.sep +", "workstation_preped_temp['workerAddress'] # This way folder is buggy workstation_dir = original_get_worker_free[curr_worker_id]['preped']['workerDir'] workstation_freemem = workstation_preped_temp['freeMemory']", "div_traj = math.ceil(traj_len/total_free_cores) # select_indexes = list(range(total_free_cores)) # Maximum parallel #div_traj = math.trunc(traj_len/total_free_cores)", "self.json_state_file, 'runType': self.run_type, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run, 'RunFolder': self.g_mmpbsa_folder, 'absoluteJsonStates': self.absolute_json_state_file,", "# # test = 1 # # total_free_cores = 0 # # for", "yolo') # # try: # self.ledock_param_title = self.ledock_data['LeDock_params']['title'] # self.rdock_title = self.ledock_data['LeDock_params']['title'] #", "self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = results[key] # self.state_data['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] # results_dask", "uberDocker folder \\n') print(self.ledock_directories) folder_utils.create_folder(self.ledock_absolute_folder_name) test = 1 self.receptor_ledock_pdb = \"{0}.pdb\".format(self.receptor_name) self.ligand_ledock_mol2 =", "# Number of new individuals to generate at each generation # Only considered", "# from this software without specific prior written permission. # THIS SOFTWARE IS", "finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) test = 1 ###################################################3 # update index", "{0} -1\\n\\n'.format(self.flexaid_res_chain) # Rotational DOF of the ligand (0) optimz2 = 'OPTIMZ 9999", "self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.ledock_directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.ledock_folder_name) print('TADA ', self.ledock_directories) test = 1 #", "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #", "= 1 # Local upload test # big_future = self.dask_prep # run_dask_tools.upload_g_mmpbsa_files_dask(big_future) #TODO", "set_mgltools_path(self, path): print('MGLTools path is set to ', path) self.mgltools_utilities = path def", "to all clients client.upload_file # task = client.submit(run_dask_tools.upload_g_mmpbsa_files_dask, # big_future, # workers=[worker_address], #", "0.0 the solvent interactions are derived from the interaction matrix # Float in", "# self.prepare_ledock_settings() # self.prep_LeDock_dock_run_commands() @hlp.timeit def prep_LeDock_dock_run_commands(self, num_samples=10): ''' Prepare rdock run commands", "Only considered when ADAPTVGA is 0 CROSRATE 0.90 # Constant mutation probability #", "os.sep + self.run_folder_name_samples def save_state_data_json(self, filedata=None, filename=None): ''' :param filename: Saves state file", "+ self.run_type_samples + '_' + str(sample_num) command_to_run = \"vina --receptor {0} \" \\", "create folder for run saving state run # filename = self.sim_folder_run + os.sep", "run_dask_tools.check_free_resources(worker_status) import copy original_get_worker_free = copy.deepcopy(get_worker_free) # TEST IT WORKS # queue_jobs =", "open(self.generate_ga_dat_name_abs, 'w') # self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) # self.generate_ga_dat_object_file.close() # # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update(", "for a while >>> EPI_uber_dock.prepare_rdock_settings() >>> EPI_uber_dock.generate_rdock_cavity() >>> # Prepare and run Dock", "###################################################3 # update index # print(curr_item) # How to save submitted jobs state", "self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories_samples = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name_samples) print('TADA ', self.directories_samples) self.json_samples_state_file =", "receptor_name self.run_type = 'g_mmpbsa' self.state_data = {} self.state_data_samples = {} self.g_mmpbsa_run_finished = False", "transformation # Add receptor name def set_mgltools_path(self, path): print('MGLTools path is set to", "def find_sample_folders(self, folder_path='.', dir_name='vina_sample'): try: dir_names = [] for dirname, dirnames, filenames in", "complexity to O(n.d) where d is the average number of neighbors, while original", "binary form must reproduce the above # copyright notice, this list of conditions", "# # get_worker_free = run_dask_tools.check_free_resources(worker_status) # # # test = 1 # #", "to json :param num_samples: test value 6 :return: ''' try: self.g_mmpbsa_sim_states = self.state_data['dockSoftware']['LeDock']['simStates']", "# self.setup_ledock_pameters = self.ledock_data['setup_LeDock'] # self.ledock_num_samples = self.ledock_data['num_samples'] # self.ledock_input_info = self.ledock_data['LeDockInputInfo'] #", "self.run_type_samples = run_type self.prep_samples_run = True self.samples_exhaust = standard_exhaust self.samples_run = list(range(1, num_samples_run", "big_future, # workers=[worker_address], # key='key_scatter_{0}'.format(worker_address), # retries=retries_num) # tasks_upload.append(task) # print(\"Starting uploading to", "in interval [0.0,1.0] # Only considered when ADAPTVGA is 0 CROSRATE 0.90 #", "abs_folder + os.sep + results_dask['polar_filename'] out_mem = results_dask['polar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem)", "+ results_dask['contribMM_filename'] out_mem = results_dask['contribMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name =", "test = 1 curr_worker_id = worker_ids[curr_worker] workstation_info_temp = get_worker_free_temp[curr_worker_id] workstation_preped_temp = workstation_info_temp['preped'] workstation_address", "DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;", "def load_state_data_json(self, filename): ''' :param filename: load json state data :return: ''' #", "False}}) # try: # os.system(command_to_run) # except KeyboardInterrupt: # # quit # sys.exit()", "# run_docking_queue += run_docking_FlexAid[curr_FlexAid:curr_FlexAid + FlexAid] # # curr_FlexAid += FlexAid # #", "run_g_mmpbsa.append(data) # # result = run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num, data) # # test = 1 #", "run_docking_LeDock[curr_LeDock:curr_LeDock + LeDock] # curr_LeDock += LeDock # # test = 1 #", "considered when ADAPTVGA is 0 CROSRATE 0.90 # Constant mutation probability # Float", "# filedata = self.state_data self.save_state_data_json(filedata=self.before_dask, filename=self.absolute_json_state_file) # allow CPU to cool down #", "-= 1 pop_item.update({'workingDir':workstation_dir}) submitted_jobs.append(pop_item) # MAYBE CHECK FOLDER HERE # #big_future = client.scatter(pop_item,", "'LasR' >>> run_type = 'vina_sample' >>> >>> >>> >>> receptor_file = EPI_folder +", "'GA_DataName': self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() ##############################################################################################", "# self.LeDock_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] # self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] # print('No need to generate", "data_pre.update({'dask': {}}) data_pre = {} data_pre.update({'tprName':tpr_filename, 'tprMem':tpr_mem}) data_pre.update({'mdpName':mdp_filename, 'mdpMem':mdp_mem}) data_pre.update({'indexName':index_filename, 'indexMem':index_mem}) self.dask_prep =", "Process with PID:{}\".format(current_pid)) # free_threads_for_Vina = num_threads - LeDock-rDock-FlexAid run_g_mmpbsa = [] run_mmpbsa_queue", "# tasks_upload = [] # big_future = client.scatter(self.dask_prep, broadcast=True) # for worker in", "interaction matrix # Float in interval [-200.0,200.0] SLVPEN 0.0 # Use Vcontacts indexing", "self.load_state_file_samples = self.json_samples_state_file self.load_state_called_samples = True self.load_samples_state_data_json(self.load_state_file_samples) self.prep_sample_run = True def get_exhaust_run_folder_name(self): curr_folder", "This will hold information about run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() ######################################################################################## #", "LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,", "VARANG 5.0 # The variation in degrees for the anchor dihedral of the", "Solvent term penalty # When the value is 0.0 the solvent interactions are", "Biotin has 5 flexible bonds flexible_bonds_data = open( self.flexaid_absolute_processed_files_folder + os.sep + '{0}.inp'.format(self.ligand_flexaid_initials),", "+ self.run_folder_name def get_samples_run_folder_name(self): curr_folder = os.getcwd() print(\"Yippie yi kay\", curr_folder) return curr_folder", "# Float in interval [0.0-1.0] DEECLA 0.8 # Use instances of side-chain conformers", "# LeDock settings part # # self.ledock_data = self.state_data['dockSoftware']['LeDock'] # test = 1", "os.sep + self.run_folder_name_samples # Create folder don't forget # Exhaustiveness for all samples", "= True print(os.path.abspath(__file__)) self.state_data_samples = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.receptor_file = self.state_data_samples['receptorFile'] self.ligand_file", "= run_dask_tools.check_free_resources(worker_status) import copy original_get_worker_free = copy.deepcopy(get_worker_free) # TEST IT WORKS # queue_jobs", "is the average number of neighbors, while original DBSCAN had memory complexity O(n).", "= {'simStates':{}} for i,traj in zip(select_indexes,select_frames): temp_state = {str(i):{}} temp_traj = self.trajectory_mdtraj[traj:traj+div_traj] temp_mdtraj.append(temp_traj)", "ADAPTKCO 0.95 0.10 0.95 0.10 # Constant crossover probability # Float in interval", "+ self.molecule_name + '.json' if filename is None and filedata is None: #", "need to get modified def find_sample_files(self, folder): try: VIP = [] for dirname,", "for i in filenames: # print i if 'out' in i: VIP.append(i) #", "number of neighbors, while original DBSCAN had memory complexity O(n). Sparse neighborhoods can", "self.ledock_zmax = self.ledock_data['LeDock_params']['zmax'] # # except: # print('LeDock_params is empty verify yolo') #", "This is for Autodock vina >>> EPI_uber_dock.set_up_Vina_Box() >>> EPI_uber_dock.prepare_Vina_run() >>> EPI_uber_dock.prepVinaSim_uberDock() >>> EPI_uber_dock.runVinaSim_uber()", "other submits jobs ############################################################################################### gmmbpsa_min_mem = 1000 retries_num = 2 curr_index = 0", "# notice, this list of conditions and the following disclaimer. # * Redistributions", "in [B,R,X] for Bissecting, Radical and Extended radical plane # See McConkey et", "'RMSDST ' + '{0}_ref.pdb\\n\\n'.format( self.ligand_flexaid_initials) final_str += rmsdst final_str += flexaid_config_input_template generate_config_input_file =", "if 'out' in i: VIP.append(i) # This is not necessary since info is", "= 1 # maybe 2 async threads, one checks finished simulations, other submits", "temp_dict = {str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'commandToClean':command_to_clean, 'dokFileName':ligand_clear_dok, 'runFinished': False}} self.LeDock_sim_states.update(temp_dict) self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict)", "try: if self.setup_ledock_pameters is not False: # print(\"Running Vina\") # TODO need to", "# # test = 1 # # try: # self.setup_ledock_pameters = self.ledock_data['setup_LeDock'] #", "filename = self.absolute_json_state_file # filedata = self.state_data self.save_state_data_json(filedata=self.before_dask, filename=self.absolute_json_state_file) # allow CPU to", "self.flexaid_path = path def set_ledock_path(self, path): print('LeDock path is set to ', path)", "preparing g_mmpbsa jobs') # TODO should I add json saving of information or", "os.chdir(self.ledock_absolute_folder_name) command_receptor = self.ledock_path + os.sep + 'lepro_linux_x86' + ' {0} '.format(self.receptor_ledock_pdb) os.system(command_receptor)", "or 1 ADAPTVGA 1 # Adaptive crossover and mutation probabilities # Floats in", "# Constant mutation probability # Float in interval [0.0,1.0] # Only considered when", "[generate_ga_dat, ] # self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') # self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) # self.generate_ga_dat_object_file.close() # #", "# Float in interval [0.0,1.0] # Only considered when ADAPTVGA is 0 MUTARATE", "run_type = 'vina_sample' >>> >>> >>> >>> receptor_file = EPI_folder + os.sep +", "print(curr_item) # How to save submitted jobs state print('-------') if curr_index == 0", "\" \\ \"--size_y {6} \" \\ \"--size_z {7} \" \\ \"--exhaustiveness {8} \"", "modified_submitted_jobs_dask = copy.deepcopy(submitted_jobs_dask) for i, job in enumerate(submitted_jobs_dask): status = job.status if status", "# random.shuffle(self.run_docking_queue) print('Finished preparing g_mmpbsa jobs') # TODO should I add json saving", "print sorted(dir_names) return sorted(dir_names) except Exception as e: print(\"Problem with finding folders :", "simStates['simStates'].update(temp_state) self.mdtraj_frames = select_frames self.mdtraj_sliced = temp_mdtraj self.mdtraj_parts = temp_mdtraj_indexes self.file_save_list = file_save_list", "gmmbpsa_min_mem and jobs_running <max_jobus: print('Submit MMPBSA job to DASK') pop_item = queue_jobs.pop(curr_index) key_name", "= folder_path self.command_run_list = [] self.command_samples_run_list = [] self.molecule_name = molname self.ligand_name =", "is buggy workstation_dir = original_get_worker_free[curr_worker_id]['preped']['workerDir'] workstation_freemem = workstation_preped_temp['freeMemory'] workstation_freecpu = workstation_preped_temp['freeCores'] curr_item_prog =", "print(self.ledock_directories) folder_utils.create_folder(self.ledock_absolute_folder_name) test = 1 self.receptor_ledock_pdb = \"{0}.pdb\".format(self.receptor_name) self.ligand_ledock_mol2 = \"{0}.mol2\".format(self.ligand_name) self.absolute_receptor_ledock_pdb =", "if self.setup_box is not False: # print(\"Running Vina\") # TODO need to think", "# select_indexes = list(range(total_free_cores)) # Maximum parallel #div_traj = math.trunc(traj_len/total_free_cores) select_frames = list(range(0,traj_len,div_traj))", "self.LeDock_command_run_list.append(command_to_run) print(\"Launching new Sim\") temp_dict = {str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'commandToClean':command_to_clean, 'dokFileName':ligand_clear_dok,", "data.update({'save_run_name': save_run_name}) data.update({'dask': {}}) traj_abs = data['absFileSave'] traj_file = open(traj_abs, 'rb') traj_mem =", "= 1 # This will hold information about run states # self.uber_dock_folder =", "# Only considered when ADAPTVGA is 0 MUTARATE 0.10 # Crossover operator #", "# sys.exit() print(\"Vina sample run command prep finished\") else: print('Please setup vina box", "LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT", "for preparation >>> self.run_uber_dock_protocol() or seperately >>> EPI_uber_dock.calculate_max_radius_from_com() >>> EPI_uber_dock.calculate_cube_edges() >>> EPI_uber_dock.calculate_box_edges_from_com() >>>", ">>> >>> LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True) >>> # >>> # LasR_MOR_mmpbsa_calc.run_dask_docking(client) Notes ----- See examples/cluster/plot_dbscan.py for", "[0.0,1.0] # Only considered when ADAPTVGA is 0 MUTARATE 0.10 # Crossover operator", "{5} \" \\ \"--size_y {6} \" \\ \"--size_z {7} \" \\ \"--exhaustiveness {8}", "for easier switch worker_ids = {} for i, id in enumerate(get_worker_free): worker_ids.update({i: id})", "OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY", "self.simStates = self.state_data['energySoftware']['g_mmpbsa']['simStates'] test = 1 self.g_mmpbsa_prepared = self.state_data['energySoftware']['g_mmpbsa']['prepare'] # self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) #", "self.ledock_ymin = self.ledock_data['LeDock_params']['ymin'] # self.ledock_ymax = self.ledock_data['LeDock_params']['ymax'] # self.ledock_zmin = self.ledock_data['LeDock_params']['zmin'] # self.ledock_zmax", "INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "self.output_ligand_rdock.write(self.ligand_pybel ) # self.output_ligand_rdock.close() self.ledock_folder_name = self.receptor_name + '_' + self.molecule_name + '_'", "test = 1 # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.ledock_directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.ledock_folder_name) print('TADA", "commands') # self.prep_LeDock_run = True # except: # print('LeDock_params simStates is empty verify", "Value of 0 or 1 ADAPTVGA 1 # Adaptive crossover and mutation probabilities", "filename = self.json_state_file filename = self.absolute_json_state_file filedata = self.state_data # elif filedata is", "self.rdock_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + 'rDock' # self.rdock_absolute_folder_name", "1)) self.run_folder_name_samples = self.receptor_name + '_' + self.molecule_name + '_' + self.run_type_samples self.sim_folder_run_samples", "'_' + self.run_type_samples + '.json' # This will hold information about run states", "'commandRun': command_to_run, 'runFinished': False}}) # try: # os.system(command_to_run) # except KeyboardInterrupt: # #", "= self.ledock_data['LeDock_params']['receptorFile'] # self.ledock_rmsd = self.ledock_data['LeDock_params']['LeDockRMSD'] # # self.ledock_xmin = self.ledock_data['LeDock_params']['xmin'] # self.ledock_xmax", "enter ledock folder and process structure for docking using lepro # ./lepro_linux_x86 LasR_flexaid.pdb", "# met: # * Redistributions of source code must retain the above copyright", "= 'LasR' >>> >>> >>> EPI_uber_dock = uber_docker.UberDockerObject(receptor_file, ligand_file, '.', molname=molname, receptor_name=receptor_name) >>>", "# >>> # LasR_MOR_mmpbsa_calc.run_dask_docking(client) Notes ----- See examples/cluster/plot_dbscan.py for an example. This implementation", "self.load_state_called_samples = False self.prep_sample_run = True else: self.load_state_file_samples = self.json_samples_state_file self.load_state_called_samples = True", "of the binding-site # Float in interval [0.1,1.0] SPACER 0.375 # Exclude hetero", "# Production # worker_status = run_dask_tools.get_dask_worker_status(curr_client) # # get_worker_free = run_dask_tools.check_free_resources(worker_status) # #", "#INCHOH # Permeability allowed between atoms # Float in interval [0.0,1.0] from fully", "num_samples_run=100, run_type='samples_run'): if self.setup_box is False: print('Please setup simulation box') sys.exit(0) self.run_type_samples =", "self.absolute_json_state_file # filedata = self.state_data self.save_state_data_json(filedata=self.before_dask, filename=self.absolute_json_state_file) # allow CPU to cool down", "= folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name) print('TADA ', self.directories) self.json_state_file = self.sim_folder_run + os.sep + self.receptor_name", "\"{0} {1}\".format(command_receptor, parm_name) ligand_clear_dok = sample_data['ligand_clear_name'] + '.dok' # -spli MOR_flexaid.dok command_to_clean =", "client.scatter(self.dask_prep, broadcast=True) # for worker in get_worker_free: # worker_info = get_worker_free[worker] # worker_address", "'GA_DataName': self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # # # self.state_data_samples =", "custom_index_curr += 2 print('----------------TEST------------------') curr_item = queue_jobs[curr_index] test = 1 curr_worker_id = worker_ids[curr_worker]", "os.sep + self.run_folder_name def get_samples_run_folder_name(self): curr_folder = os.getcwd() print(\"Yippie yi kay\", curr_folder) return", "'samplesList': self.samples_run, 'folderPath': self.folder_path, 'runType': self.run_type_samples, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run_samples, 'directory':", "'ga_inp_' + self.receptor_name + '-' + self.ligand_name + '.dat' self.generate_ga_dat_name_abs = self.flexaid_absolute_input_folder +", "workstation_info_temp = get_worker_free_temp[curr_worker_id] workstation_preped_temp = workstation_info_temp['preped'] workstation_address = workstation_preped_temp['workerAddress'] # This way folder", "residue number 9999 and chain A # Translational DOF of the ligand (-1)", "run command :param sample_num: :param pose_gen: default generate 20 poses :return: ''' try:", "probability # Float in interval [0.0,1.0] # Only considered when ADAPTVGA is 0", "self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile': self.tpr_file}) self.save_state_data_json() test = 1 #self.g_mmpbsa_sim_states = self.state_data['energySoftware']['g_mmpbsa']['simStates'] #self.ledock_samples = self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list'] #", "workstation_dir = original_get_worker_free[curr_worker_id]['preped']['workerDir'] workstation_freemem = workstation_preped_temp['freeMemory'] workstation_freecpu = workstation_preped_temp['freeCores'] curr_item_prog = curr_item['Program'] ############################################################", "sample_num): # try: if self.setup_box is not False: # print(\"Running Vina\") # TODO", "os.chdir(self.uber_dock_folder) self.state_data['dockSoftware']['LeDock'].update( {'receptor_pdb': self.receptor_ledock_pdb, 'ligand_mol2': self.ligand_ledock_mol2, 'lepro_pdb': self.lepro_pdb_file, 'lepro_abs_pdb': self.ledock_absolute_folder_name + os.sep +", "# try: if self.setup_box is not False: # print(\"Running Vina\") # TODO need", "in runSim: \", e) sys.exit(0) @hlp.timeit def check_dask_jobs(self, submitted_jobs_dask, finished_jobs, finished_jobs_dict): import copy", "pop_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Cur run ', run_name) if curr_index == 0:", "in uberDocker folder \\n') print(self.ledock_directories) folder_utils.create_folder(self.ledock_absolute_folder_name) test = 1 self.receptor_ledock_pdb = \"{0}.pdb\".format(self.receptor_name) self.ligand_ledock_mol2", "instances of side-chain conformers rather than using the Penultimate Rotamer Library #ROTOBS #", "= results[key]['part_num'] results_dask = results[key]['dask'] original_data = self.state_data['energySoftware'][prog] abs_folder = self.g_mmpbsa_folder # original_data['AbsFolder']", "= index_file.read() index_filename = index_abs.split(os.sep)[-1] # data_pre = self.state_data['energySoftware']['g_mmpbsa'] # data_pre.update({'dask': {}}) data_pre", "1000 retries_num = 2 curr_index = 0 curr_worker = 0 # prepare worker", "self.run_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + self.run_type self.sim_folder_run =", "test self.samples_exhaust = self.state_data_samples['samples_exhaust'] self.sim_folder_run_samples = self.state_data_samples['simRunFolder'] # .split('/')[-1] self.directories_samples = self.state_data_samples['directory'] self.setup_box", "self.json_state_file filename = self.absolute_json_state_file filedata = self.state_data # elif filedata is not None:", "and save as pandas dataframe Calculate pdb receptor center of mass :return: '''", "docking using lepro # ./lepro_linux_x86 LasR_flexaid.pdb os.chdir(self.ledock_absolute_folder_name) command_receptor = self.ledock_path + os.sep +", "in interval [0.0-1.0] DEECLA 0.8 # Use instances of side-chain conformers rather than", "{} data_pre.update({'tprName':tpr_filename, 'tprMem':tpr_mem}) data_pre.update({'mdpName':mdp_filename, 'mdpMem':mdp_mem}) data_pre.update({'indexName':index_filename, 'indexMem':index_mem}) self.dask_prep = data_pre for part_num in", "json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.trajectory_file = self.state_data['trajectoryFile'] self.mdp_file = self.state_data['mdpFile'] self.tpr_file = self.state_data['tprFile']", "command prep finished\") else: print('Please setup vina box settings') # except Exception as", "* Neither the name of the molmolpy Developers nor the names of any", "rgnopt_locclf = 'RNGOPT LOCCLF ' + 'global_binding_site.pdb\\n\\n' # Specify the degrees of freedom", "information about run states self.g_mmpbsa_folder = self.get_uber_g_mmpbsa_run_folder_name() self.absolute_json_state_file = self.g_mmpbsa_folder + os.sep +", "folder for g_mmpbsa run\\n') print(self.sim_folder_run) folder_utils.create_folder(self.sim_folder_run) self.folder_exists = True programs_dict = {'energySoftware': {'g_mmpbsa':", "self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() self.save_state_data_json() # TODO this part needs to be", "python # # @file __init__.py # @brief G_MMPBSA DASK PROJECT # @author <NAME>", "flexaid_generate_ga_dat_parameters(self): ''' Generate GA dat parameters for flexaid docking :return: ''' self.flexaid_ga_dat_param_template =", "parm_name = sample_data['ledock_parm_name'] test = 1 self.save_run_name = \"ledock_{0}_sample_{1}\".format(self.run_type, sample_num) random_seed = np.random.randint(low_seed,", "about run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() ######################################################################################## # # LeDock settings part", "in [BOOM,STEADY] REPMODEL BOOM # Fraction of population to create # Only considered", "\\ \"--center_z {4} \" \\ \"--size_x {5} \" \\ \"--size_y {6} \" \\", "mdp_filename, 'mdpMem': mdp_mem}) data['dask'].update({'indexName': index_filename, 'indexMem': index_mem}) test = 1 # data['dask'].update({'cavFile':cav_file_mem })", "# self.ledock_ymin = self.ledock_data['LeDock_params']['ymin'] # self.ledock_ymax = self.ledock_data['LeDock_params']['ymax'] # self.ledock_zmin = self.ledock_data['LeDock_params']['zmin'] #", "TODO create folder for run saving state run # filename = self.sim_folder_run +", "\" \\ \"--center_x {2} \" \\ \"--center_y {3} \" \\ \"--center_z {4} \"", "retries_num = 2 # # # Upload files to all clients client.upload_file #", "plane # See McConkey et al. (2002) Bioinformatics. 18(10); 1365-1373 VCTPLA R #", "print('Everything is finished :))))))') print('---' * 10) print('\\n') def prepare_for_dask_cluster(self, LeDock=2, rDock=2, FlexAid=2,", "True # except: # print('LeDock_params simStates is empty verify yolo') # # test", "GMMPBSAObject(object): \"\"\" Usage example >>> EPI_folder = '/media/Work/MEGA/Programming/StressHormones/dock_EPI' >>> EPI_samples = '/media/Work/MEGA/Programming/StressHormones/' >>>", "flex_index = temp[-2] flexible_index_list.append(int(flex_index)) temp_line = 'OPTIMZ {0} {1} {2}\\n'.format(self.flexaid_res_number, self.flexaid_res_chain, flex_index) flexible_index_list_phrases.append(temp_line)", "folder_utils.create_folder(self.ledock_absolute_folder_name) test = 1 self.receptor_ledock_pdb = \"{0}.pdb\".format(self.receptor_name) self.ligand_ledock_mol2 = \"{0}.mol2\".format(self.ligand_name) self.absolute_receptor_ledock_pdb = self.ledock_absolute_folder_name", "os.sep + results_dask['contrib_apol_filename'] out_mem = results_dask['contrib_apol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name", "folder_utils.find_folder_in_path(self.uber_dock_folder, self.ledock_folder_name) print('TADA ', self.ledock_directories) test = 1 # This will hold information", "et al. (2002) Bioinformatics. 18(10); 1365-1373 VCTPLA R # Use normalized surfaces in", "= Outputfile(\"mol2\", \"{0}.mol2\".format(self.receptor_name)) # self.output_receptor_rdock.write(self.receptor_pybel) # self.output_receptor_rdock.close() # # self.output_ligand_rdock = Outputfile(\"sd\", \"{0}.sd\".format(self.ligand_name))", "I add json saving of information or not? def load_samples_state_data_json(self, filename): ''' :param", "'runType': self.run_type_samples, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run_samples, 'directory': self.directories_samples, 'setup': self.setup_box, 'folderCreated':", "= run_dask_tools.run_gmmpbsa_using_dask(job_test) test = 1 # Local upload test # big_future = self.dask_prep", "\" \\ \"--ligand {1} \" \\ \"--center_x {2} \" \\ \"--center_y {3} \"", "generation # Only considered when REPMODEL is STEADY # Integer in interval [1,N-1]", "less than 16 jobs_running = len(submitted_jobs_dask) - len(finished_jobs) max_jobus = max_jobs_to_run # g_mmpbsa", "0.8 # Use instances of side-chain conformers rather than using the Penultimate Rotamer", "are listed as FLEDIH lines in Processed_files/BTN.inp # In our example, Biotin has", "+ os.sep + '{0}.inp'.format(self.ligand_flexaid_initials), 'r') flexible_bonds_data_text = flexible_bonds_data.read() flexible_bonds_data.close() flexible_bonds_data_text_list = flexible_bonds_data_text.split('\\n') flexible_index_list_phrases", "color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold', 'darkorange']) import multiprocessing import mdtraj as md", "''' current_pid = multiprocessing.current_process().pid print(\"Main Process with PID:{}\".format(current_pid)) # free_threads_for_Vina = num_threads -", "run_mmpbsa_queue: # print(pre_job) if pre_job['runFinished'] is False: final_queue_job.append(pre_job) test = 1 self.run_mmpbsa_dask =", "if filename is None and filedata is None: # # filename = self.json_state_file", "self.directories) self.json_state_file = self.sim_folder_run + os.sep + self.receptor_name + '_' + self.molecule_name +", "test = 1 ################################################################################################### test = 1 #################################################################################################### self.run_mmpbsa_dask = run_g_mmpbsa curr_LeDock =", "print('FlexAid path is set to ', path) self.flexaid_path = path def set_ledock_path(self, path):", "'EPI' >>> receptor_name = 'LasR' >>> >>> >>> EPI_uber_dock = uber_docker.UberDockerObject(receptor_file, ligand_file, '.',", "out_file.close() out_name = abs_folder + os.sep + results_dask['polar_filename'] out_mem = results_dask['polar_mem'] out_file =", "SLVPEN 0.0 # Use Vcontacts indexing VINDEX # Vcontacts plane definition # Value", "'OPTIMZ {0} {1} {2}\\n'.format(self.flexaid_res_number, self.flexaid_res_chain, flex_index) flexible_index_list_phrases.append(temp_line) test = 1 final_str += pdbnam", "= self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] # print('No need to generate LeDock commands') # self.prep_LeDock_run = True", "'global_binding_site.pdb\\n\\n' # Specify the degrees of freedom (DOF) of the processed ligand with", "BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES", "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE", "Only calculate the CF for ligand atoms despite including flexible side-chains #SCOLIG #", "sorted(dir_names) return sorted(dir_names) except Exception as e: print(\"Problem with finding folders : \",", "self.g_mmpbsa_prepared is True: full_g_mmpbsa_data = self.state_data['energySoftware']['g_mmpbsa'] test = 1 tpr_abs= full_g_mmpbsa_data['tprFile'] tpr_file =", "self.ledock_data['LeDock_params']['zmax'] # # except: # print('LeDock_params is empty verify yolo') # # try:", "= max_jobs_to_run # g_mmpbsa part if curr_item_prog == 'g_mmpbsa': if workstation_freemem > gmmbpsa_min_mem", "'commandRun': command_to_run, 'commandToClean':command_to_clean, 'dokFileName':ligand_clear_dok, 'runFinished': False}} self.LeDock_sim_states.update(temp_dict) self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict) # try: # os.system(command_to_run) #", "flexible_bonds_data_text = flexible_bonds_data.read() flexible_bonds_data.close() flexible_bonds_data_text_list = flexible_bonds_data_text.split('\\n') flexible_index_list_phrases = [] flexible_index_list = []", "'mdpFile':self.mdp_file, 'tprFile':self.tpr_file}) energy_mm = 'energy_MM_{0}.xvg'.format(i) polar = 'polar_{0}.xvg'.format(i) apolar = 'apolar_{0}.xvg'.format(i) contrib_mm =", "Generate GA dat parameters for flexaid docking :return: ''' self.flexaid_ga_dat_param_template = '''# Number", "# Create folder don't forget # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories = folder_utils.find_folder_in_path(self.folder_path,", "is empty verify yolo') # # try: # self.LeDock_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] # self.ledock_samples", "self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') # self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) # self.generate_ga_dat_object_file.close() # # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) #", "= temp_mdtraj_indexes self.file_save_list = file_save_list self.abs_file_save_list = abs_file_save_list self.simStates = simStates test =", "not prepare run files' if self.g_mmpbsa_prepared is True: print('Do not prep files') return", "'energy_MM_{0}.xvg'.format(i) polar = 'polar_{0}.xvg'.format(i) apolar = 'apolar_{0}.xvg'.format(i) contrib_mm = 'contrib_MM_{0}.dat'.format(i) contrib_pol = 'contrib_pol_{0}.dat'.format(i)", "can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- \"\"\" def __init__(self,", "out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['energyMM_filename']", "# Only considered when ADAPTVGA is 0 CROSRATE 0.90 # Constant mutation probability", "print(\"error in Sample runSim: \", e) # sys.exit(0) def get_molecule_name(self): return self.molecule_name def", "'-' + self.ligand_name + '.dat' generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat return", "try: if self.setup_box is not False: # print(\"Running Vina\") # TODO need to", "self.prep_LeDock_run = True # except: # print('LeDock_params simStates is empty verify yolo') #", "in interval [1.0-30.0] VARFLX 10.0 # Use Vcontacts in the calculations of surfaces", "worker_status = run_dask_tools.get_dask_worker_status(curr_client) get_worker_free = run_dask_tools.check_free_resources(worker_status) import copy original_get_worker_free = copy.deepcopy(get_worker_free) # TEST", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED", "receptor with LePro\\n') os.chdir(self.uber_dock_folder) self.state_data['dockSoftware']['LeDock'].update( {'receptor_pdb': self.receptor_ledock_pdb, 'ligand_mol2': self.ligand_ledock_mol2, 'lepro_pdb': self.lepro_pdb_file, 'lepro_abs_pdb': self.ledock_absolute_folder_name", "= 'ga_inp_' + self.receptor_name + '-' + self.ligand_name + '.dat' self.generate_ga_dat_name_abs = self.flexaid_absolute_input_folder", "= 'Unknown', load_state_file=None): self.load_state_file = load_state_file if load_state_file is not None: self.load_state_data_json(self.load_state_file) else:", "part runs the main program submitted_jobs = [] submitted_jobs_dask = [] queue_jobs =", "- len(finished_jobs) max_jobus = max_jobs_to_run # g_mmpbsa part if curr_item_prog == 'g_mmpbsa': if", "example. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to", "= 0 # prepare worker ids for easier switch worker_ids = {} for", "(water,metal,modified amino acids,cofactors,ligands) # To exclude these groups, uncomment the next line #EXCHET", "ids for easier switch worker_ids = {} for i, id in enumerate(get_worker_free): worker_ids.update({i:", "+ self.run_folder_name_samples # Create folder don't forget # Exhaustiveness for all samples #", "= 'contrib_pol_{0}.dat'.format(i) contrib_apol = 'contrib_apol_{0}.dat'.format(i) temp_state[str(i)].update({'energyMM':energy_mm, 'polar':polar, 'apolar':apolar, 'contrib_MM':contrib_mm, 'contrib_pol':contrib_pol, 'contrib_apol':contrib_apol}) temp_traj.save(abs_file_save) temp_state[str(i)].update({'fileSaved':", "# worker_status = run_dask_tools.get_dask_worker_status(curr_client) # # get_worker_free = run_dask_tools.check_free_resources(worker_status) # # # test", "DOF of the ligand (0) optimz2 = 'OPTIMZ 9999 {0} 0\\n\\n'.format(self.flexaid_res_chain) # Add", "Delay for 1 sec print('Ok %s secs have pass' % (n)) @hlp.timeit def", "[-200.0,200.0] SLVPEN 0.0 # Use Vcontacts indexing VINDEX # Vcontacts plane definition #", "# big_future = self.dask_prep # run_dask_tools.upload_g_mmpbsa_files_dask(big_future) #TODO # Scatter a lot better using", "Vcontacts plane definition # Value in [B,R,X] for Bissecting, Radical and Extended radical", "DBSCAN had memory complexity O(n). Sparse neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`", "for g_mmpbsa run\\n') print(self.sim_folder_run) folder_utils.create_folder(self.sim_folder_run) self.folder_exists = True programs_dict = {'energySoftware': {'g_mmpbsa': {}}}", "# Float in interval [1.0-30.0] VARFLX 10.0 # Use Vcontacts in the calculations", "# This will hold information about run states self.g_mmpbsa_folder = self.get_uber_g_mmpbsa_run_folder_name() self.absolute_json_state_file =", "PROJECT # @author <NAME> # # <!-------------------------------------------------------------------------- # # Copyright (c) 2016-2019,<NAME>. #", "random.shuffle(self.run_docking_queue) print('Finished preparing g_mmpbsa jobs') # TODO should I add json saving of", "dihedral of the ligand # Float in interval [1.0-30.0] VARDIH 5.0 # The", "Exception as e: print(\"error in find_files: \", e) sys.exit(0) def find_sample_folders(self, folder_path='.', dir_name='vina_sample'):", "workstation_freemem = workstation_preped_temp['freeMemory'] workstation_freecpu = workstation_preped_temp['freeCores'] curr_item_prog = curr_item['Program'] ############################################################ # submitted_jobs_dask len", "rotamer acceptance # Float in interval [0.0,1.0] from fully permeable to no permeability", "return self.receptor_name def set_molecule_name(self, mol_name): self.molecule_name = mol_name def set_receptor_name(self, receptor_name): self.receptor_name =", "= abs_folder + os.sep + results_dask['out_filename'] out_mem = results_dask['out_mem'] out_file = open(out_name, 'w')", "self.lepro_pdb_file, 'lepro_abs_pdb': self.ledock_absolute_folder_name + os.sep + self.lepro_pdb_file, 'abs_receptor_pdb': self.absolute_receptor_ledock_pdb, 'abs_ligand_mol2': self.absolute_ligand_ledock_mol2, 'LeDockFolderStatus': self.ledock_folder_exists,", "NORMAR # Define the RMSD cutoff between clusters # Float in interval [0.5,3.0]", "self.receptor_name + '_' + self.molecule_name + '.json' if filename is None and filedata", "A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER", "the ligand (-1) optimz1 = 'OPTIMZ 9999 {0} -1\\n\\n'.format(self.flexaid_res_chain) # Rotational DOF of", "'abs_receptor_pdb': self.absolute_receptor_ledock_pdb, 'abs_ligand_mol2': self.absolute_ligand_ledock_mol2, 'LeDockFolderStatus': self.ledock_folder_exists, 'LeDockAbsFolder': self.ledock_absolute_folder_name, 'LeDockFolderName': self.ledock_folder_name}) self.save_state_data_json() self.load_state_called =", "to create a quiiee # # retries_num = 2 # # task =", "results_dask['contribMM_filename'] out_mem = results_dask['contribMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder", "initial population is generated randomly POPINIMT RANDOM # Fitness function # Value in", "# self.prep_LeDock_dock_run_commands() @hlp.timeit def prep_LeDock_dock_run_commands(self, num_samples=10): ''' Prepare rdock run commands and save", "= results_dask['apolar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep", "= tpr_file self.mdp_file = mdp_file self.index_file = index_file self.first_index = first_index self.second_index =", "is generated randomly POPINIMT RANDOM # Fitness function # Value in [LINEAR,PSHARE] FITMODEL", "big_future = client.scatter(self.dask_prep, broadcast=True) # for worker in get_worker_free: # worker_info = get_worker_free[worker]", "== len(queue_jobs): curr_index = 0 if curr_worker == len(worker_ids): curr_worker = 0 print('-----------------------------------------------------------------')", "console # Integer in interval [1,N] where N is NUMCHROM PRINTCHR 10 '''", "DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #", "# self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) # self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) except: print('G_mmpbsa", "FLEDIH lines in Processed_files/BTN.inp # In our example, Biotin has 5 flexible bonds", "pass finished_jobs.append(job) finished_jobs_dict.update({i: True}) results = job.result() test = 1 try: key =", "+= run_docking_LeDock[curr_LeDock:curr_LeDock + LeDock] # curr_LeDock += LeDock # # test = 1", "= os.getcwd() return curr_folder + os.sep + self.run_folder_name def get_samples_run_folder_name(self): curr_folder = os.getcwd()", "self.folder_path = self.state_data_samples['folderPath'] self.run_type = self.state_data_samples['runType'] self.molecule_name = self.state_data_samples['molName'] self.receptor_name = self.state_data_samples['receptorName'] #", "get_receptor_name(self): return self.receptor_name def set_molecule_name(self, mol_name): self.molecule_name = mol_name def set_receptor_name(self, receptor_name): self.receptor_name", "= self.state_data['dockSoftware']['LeDock'] # test = 1 # # # Try to load initial", "'_' + self.molecule_name + '_' + self.run_type self.sim_folder_run = self.folder_path + os.sep +", "results_dask['apolar_filename'] out_mem = results_dask['apolar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder", "the distribution. # * Neither the name of the molmolpy Developers nor the", "for worker in get_worker_free: # worker_info = get_worker_free[worker] # worker_address = worker_info['preped']['workerAddress'] #", "OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER", "comment this part for a while >>> EPI_uber_dock.prepare_rdock_settings() >>> EPI_uber_dock.generate_rdock_cavity() >>> # Prepare", "Value in [B,R,X] for Bissecting, Radical and Extended radical plane # See McConkey", "crossover probability # Float in interval [0.0,1.0] # Only considered when ADAPTVGA is", "self.trajectory_mdtraj[traj:traj+div_traj] temp_mdtraj.append(temp_traj) temp_mdtraj_indexes.append(i) file_save = 'traj_part{0}.xtc'.format(i) abs_file_save = folder_to_save + os.sep + file_save", "# self.uber_dock_folder = self.get_uber_dock_run_folder_name() if len(self.ledock_directories) == 0: print('Creating rdock folder in uberDocker", "path is set to ', path) self.flexaid_path = path def set_ledock_path(self, path): print('LeDock", "# This way folder is buggy workstation_dir = original_get_worker_free[curr_worker_id]['preped']['workerDir'] workstation_freemem = workstation_preped_temp['freeMemory'] workstation_freecpu", "job to DASK') pop_item = queue_jobs.pop(curr_index) key_name = pop_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id)", "self.command_samples_run_list = [] self.molecule_name = molname self.ligand_name = molname self.receptor_name = receptor_name self.run_type", "TODO this works need to create a quiiee # # retries_num = 2", "= [] for dirname, dirnames, filenames in os.walk(folder_path): # print(dirname, '-') if dir_name", "# results[key] update_results = copy.deepcopy(results) update_results[key].pop('dask', None) # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = results[key] #", "True}) results = job.result() test = 1 try: key = list(results.keys())[0] prog =", "# Parameters of the shared fitness function # Floats in interval [0.0,1000.0] SHAREALF", "{'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'runFinished': False}}) # try: # os.system(command_to_run) # except KeyboardInterrupt:", "= sample_data['ledock_parm_name'] test = 1 self.save_run_name = \"ledock_{0}_sample_{1}\".format(self.run_type, sample_num) random_seed = np.random.randint(low_seed, high_seed)", "self.y_center, self.z_center, self.x_size, self.y_size, self.z_size, self.samples_exhaust, self.num_modes, self.save_run_name, self.save_run_name) print(command_to_run) self.command_samples_run_list.append(command_to_run) print(\"Launching new", "Only considered when ADAPTVGA is 0 MUTARATE 0.10 # Crossover operator # Intragenic", "examples/cluster/plot_dbscan.py for an example. This implementation bulk-computes all neighborhood queries, which increases the", "# update index # print(curr_item) # How to save submitted jobs state print('-------')", "1 # maybe 2 async threads, one checks finished simulations, other submits jobs", "+ 'lepro_linux_x86' + ' {0} '.format(self.receptor_ledock_pdb) os.system(command_receptor) self.lepro_pdb_file = 'pro.pdb' # Need to", "simulation box') sys.exit(0) self.run_type_samples = run_type self.prep_samples_run = True self.samples_exhaust = standard_exhaust self.samples_run", "EPI_folder + os.sep + 'centroid.pdb' >>> ligand_file = EPI_folder + os.sep + 'EPI.pdb'", "json from molmolpy.utils import helper as hlp # matplotlib.style.use('ggplot') sns.set(style=\"darkgrid\") low_seed = 1", "Prepare receptor mdtraj object get mdtraj topology and save as pandas dataframe Calculate", "together >>> self.prepare_uber_dock_protocol() for preparation >>> self.run_uber_dock_protocol() or seperately >>> EPI_uber_dock.calculate_max_radius_from_com() >>> EPI_uber_dock.calculate_cube_edges()", "final_str += y final_str += '\\n' rmsdst = 'RMSDST ' + '{0}_ref.pdb\\n\\n'.format( self.ligand_flexaid_initials)", "'''# Optimization method (genetic-algorithms) METOPT GA # The variation in degrees for the", "'r') mdp_mem = mdp_file.read() mdp_filename = mdp_abs.split(os.sep)[-1] index_abs= full_g_mmpbsa_data['indexFile'] index_file = open(index_abs, 'r')", "load_state_data_json(self, filename): ''' :param filename: load json state data :return: ''' # self.absolute_path", "SHARESCL 10.0 # Reproduction model # Values in [BOOM,STEADY] REPMODEL BOOM # Fraction", "', self.directories_samples) self.json_samples_state_file = self.sim_folder_run_samples + os.sep + self.receptor_name + '_' + self.molecule_name", "need to create a quiiee # # retries_num = 2 # # task", "EPI_uber_dock.prep_FlexAid_dock_run_commands() >>> EPI_uber_dock.run_FlexAid_simulation(parallel=True, waitTime=15) >>> >>> >>> # This is for Autodock vina", "# This is not necessary since info is inside pdbqt file # elif", "self.ligand_name + '.dat' generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat return [generate_ga_dat, ]", "def get_molecule_name(self): return self.molecule_name def get_receptor_name(self): return self.receptor_name def set_molecule_name(self, mol_name): self.molecule_name =", "', self.directories) test = 1 # This will hold information about run states", "print(\"Yippie yi kay\", curr_folder) return curr_folder + os.sep + self.run_folder_name_samples def save_state_data_json(self, filedata=None,", "''' Generate flexaid config input file Flexaid is very strict about spaces :return:", "rDock] # curr_rDock += rDock # # run_docking_queue += run_docking_FlexAid[curr_FlexAid:curr_FlexAid + FlexAid] #", "as outfile: # json.dump(self.cluster_models, outfile) # pickle.dump(self.cluster_models, open(filename, \"wb\")) # TODO create folder", "# Exclude hetero groups in the target (water,metal,modified amino acids,cofactors,ligands) # To exclude", "bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d", "= open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['polar_filename'] out_mem", "interval [0.0,1.0] # Only considered when ADAPTVGA is 0 MUTARATE 0.10 # Crossover", "self.second_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile': self.index_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile': self.mdp_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile': self.tpr_file}) self.save_state_data_json() test = 1 #self.g_mmpbsa_sim_states =", "DEECLA 0.8 # Use instances of side-chain conformers rather than using the Penultimate", "flexible bonds are listed as FLEDIH lines in Processed_files/BTN.inp # In our example,", "full_g_mmpbsa_data['mdpFile'] mdp_file = open(mdp_abs, 'r') mdp_mem = mdp_file.read() mdp_filename = mdp_abs.split(os.sep)[-1] index_abs= full_g_mmpbsa_data['indexFile']", "these groups, uncomment the next line #EXCHET # Include water molecules in the", "= itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold', 'darkorange']) import multiprocessing import mdtraj as md from", "reserved. # Redistribution and use in source and binary forms, with or without", "g_mmpbsa_dask.GMMPBSAObject(traj, topol_file, tpr_file, mdp_file, index_file, first_index, second_index, molname, receptor_name) >>> >>> >>> >>>", "out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['apolar_filename']", "ligand (-1) optimz1 = 'OPTIMZ 9999 {0} -1\\n\\n'.format(self.flexaid_res_chain) # Rotational DOF of the", "os.sep + 'lepro_linux_x86' + ' {0} '.format(self.receptor_ledock_pdb) os.system(command_receptor) self.lepro_pdb_file = 'pro.pdb' # Need", "generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat return [generate_ga_dat, ] # self.generate_ga_dat_object_file =", "os.sep + 'EPI.pdbqt' >>> molname = 'EPI' >>> receptor_name = 'LasR' >>> run_type", "of TOP individuals to print in console # Integer in interval [1,N] where", "self.output_ligand_rdock = Outputfile(\"sd\", \"{0}.sd\".format(self.ligand_name)) # self.output_ligand_rdock.write(self.ligand_pybel ) # self.output_ligand_rdock.close() self.ledock_folder_name = self.receptor_name +", "self.ledock_num_samples = self.ledock_data['num_samples'] # self.ledock_input_info = self.ledock_data['LeDockInputInfo'] # self.param_ledock_template = self.ledock_data['paramFull'] # except:", "self.ledock_rmsd = 0.5 self.set_up_ledock_dock_blind_parameters(title=self.ledock_title, receptor_file=self.lepro_pdb_file, ledock_rmsd=self.ledock_rmsd, x_center=self.x_center, y_center=self.y_center, z_center=self.z_center) else: print('state has beeen", "part # # self.ledock_data = self.state_data['dockSoftware']['LeDock'] # test = 1 # # #", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "{'simStates': {}} self.objects_loaded = False self.g_mmpbsa_prepared = False # This part needs clarification", "# filename = self.absolute_json_state_file # filedata = self.state_data self.save_state_data_json(filedata=self.before_dask, filename=self.absolute_json_state_file) # allow CPU", "'finished': test = 1 # pop_item = modified_submitted_jobs_dask.pop(i) try: if finished_jobs_dict[i] is True:", "print('Finished preparing g_mmpbsa jobs') # TODO should I add json saving of information", "Vina=2, parallel=False): ''' run uber dock protocol for LeDock, rDock,FlexAid, Vina :return: '''", "new Sim\") self.state_data_samples['simStates'].update({str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'runFinished': False}}) # try: # os.system(command_to_run)", "print('i is ', i) print('Finished checking dask submissions ---\\n') print('---' * 10) return", "# self.ledock_data = self.state_data['dockSoftware']['LeDock'] # test = 1 # # # Try to", ">>> >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client) >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True) >>> # >>> #", "abs_folder + os.sep + results_dask['contribMM_filename'] out_mem = results_dask['contribMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem)", "key=run_name, retries=retries_num) submitted_jobs_dask.append(task_g_mmpbsa) else: key_name = curr_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Passed running", "True else: self.load_state_file_samples = self.json_samples_state_file self.load_state_called_samples = True self.load_samples_state_data_json(self.load_state_file_samples) self.prep_sample_run = True def", "''' self.run_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + self.run_type self.sim_folder_run", "+ os.sep + self.receptor_ledock_pdb self.absolute_ligand_ledock_mol2 = self.ledock_absolute_folder_name + os.sep + self.ligand_ledock_mol2 self.receptor_pybel.write(\"pdb\", self.absolute_receptor_ledock_pdb,", "molname=molname, receptor_name=receptor_name) >>> >>> >>> EPI_uber_dock.prepare_uber_dock_protocol() >>> EPI_uber_dock.run_uber_dock_protocol() Use together >>> self.prepare_uber_dock_protocol() for", "ligand file to use # BTN.inp has the unique RESNUMC identifier LIG9999A inplig", "filedata = filedata # filename = self.absolute_json_state_file else: filedata = filedata filename =", "in the target (always removed by default) # Only considered if EXCHET is", "self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run_samples, 'directory': self.directories_samples, 'setup': self.setup_box, 'folderCreated': self.folder_exists_samples, 'simStates': {}})", "= self.state_data_samples['ligandFile'] self.exhaustiveness = self.state_data_samples['exhaustivenessList'] self.samples_run = self.state_data_samples['samplesList'] self.folder_path = self.state_data_samples['folderPath'] self.run_type =", "R # Use normalized surfaces in contacts NORMAR # Define the RMSD cutoff", "os.system(command_to_run) # except KeyboardInterrupt: # # quit # sys.exit() print(\"Vina sample run command", "= 'RNGOPT LOCCLF ' + 'global_binding_site.pdb\\n\\n' # Specify the degrees of freedom (DOF)", "identifier LIG9999A inplig = 'INPLIG ' + '{0}.inp\\n\\n'.format( self.ligand_flexaid_initials) # Specify to use", "----- See examples/cluster/plot_dbscan.py for an example. This implementation bulk-computes all neighborhood queries, which", "# tasks_upload.append(task) # print(\"Starting uploading to \", worker_address) test = 1 # TODO", "# Prepare outputs import copy self.before_dask = copy.deepcopy(self.state_data) ################################################################################ if self.g_mmpbsa_prepared is True:", "# # # # # This is to run on dask server #", "print('Ok %s secs have pass' % (n)) @hlp.timeit def prepVinaSampleCommand(self, sample_num): # try:", "# # key='key_test', # # retries=retries_num) # # # TODO This part needs", "'trajName':traj_filename}) data['dask'].update({'tprName': tpr_filename, 'tprMem': tpr_mem}) data['dask'].update({'mdpName': mdp_filename, 'mdpMem': mdp_mem}) data['dask'].update({'indexName': index_filename, 'indexMem': index_mem})", "self.samples_run = list(range(1, num_samples_run + 1)) self.run_folder_name_samples = self.receptor_name + '_' + self.molecule_name", "* 10) print('\\n') def prepare_for_dask_cluster(self, LeDock=2, rDock=2, FlexAid=2, Vina=2, parallel=False): ''' run uber", "######################################################################################## # # LeDock settings part # # self.ledock_data = self.state_data['dockSoftware']['LeDock'] # test", "curr_worker_id = worker_ids[curr_worker] workstation_info_temp = get_worker_free_temp[curr_worker_id] workstation_preped_temp = workstation_info_temp['preped'] workstation_address = workstation_preped_temp['workerAddress'] #", "ligand # Float in interval [1.0-30.0] VARFLX 10.0 # Use Vcontacts in the", "allowed between atoms # Float in interval [0.0,1.0] from fully permeable to no", "self.abs_file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex': self.first_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex': self.second_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile': self.index_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile': self.mdp_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile': self.tpr_file}) self.save_state_data_json()", "= 1 final_str += pdbnam final_str += inplig final_str += rgnopt_locclf final_str +=", "upload G_MMPBSA files # test = 1 # tasks_upload = [] # big_future", "receptor_name): self.receptor_name = receptor_name # This might need to get modified def find_sample_files(self,", "states self.g_mmpbsa_folder = self.get_uber_g_mmpbsa_run_folder_name() self.absolute_json_state_file = self.g_mmpbsa_folder + os.sep + self.receptor_name + '_'", "self.ledock_data['paramFull'] # except: # print('LeDock setting part is empty verify yolo') # #", "flexible_bonds_data_text.split('\\n') flexible_index_list_phrases = [] flexible_index_list = [] for i in flexible_bonds_data_text_list: if 'FLEDIH'", "fitness function # Floats in interval [0.0,1000.0] SHAREALF 4.0 SHAREPEK 5.0 SHARESCL 10.0", "the final results # Comment the next line if you wish to obtain", "in find_files: \", e) sys.exit(0) def find_sample_folders(self, folder_path='.', dir_name='vina_sample'): try: dir_names = []", "final results # Comment the next line if you wish to obtain the", "jobs ############################################################################################### gmmbpsa_min_mem = 1000 retries_num = 2 curr_index = 0 curr_worker =", "print(self.sim_folder_run) folder_utils.create_folder(self.sim_folder_run) self.folder_exists = True programs_dict = {'energySoftware': {'g_mmpbsa': {}}} self.state_data.update({'trajectoryFile': self.trajectory_file, 'mdpFile':", "except Exception as error: print('error is ', error) # print('i is ', i)", "+ os.sep + 'ledock_linux_x86' sample_data = self.ledock_input_info[str(sample_num)] parm_name = sample_data['ledock_parm_name'] test = 1", "self.file_save_list = file_save_list self.abs_file_save_list = abs_file_save_list self.simStates = simStates test = 1 self.g_mmpbsa_prepared", "self.LeDock_sim_states.update(temp_dict) self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict) # try: # os.system(command_to_run) # except KeyboardInterrupt: # # quit #", "without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "= job.result() test = 1 try: key = list(results.keys())[0] prog = results[key]['Program'] #", "total_free_cores += preped['freeCores'] if prep_g_mmpbsa is False: print('prep gmmpbsa ', prep_g_mmpbsa) return 'Do", "= 'energy_MM_{0}.xvg'.format(i) polar = 'polar_{0}.xvg'.format(i) apolar = 'apolar_{0}.xvg'.format(i) contrib_mm = 'contrib_MM_{0}.dat'.format(i) contrib_pol =", "queue_jobs = self.run_mmpbsa_dask # job_test = queue_jobs[0] # # result = run_dask_tools.run_gmmpbsa_using_dask(job_test) test", "# try: # self.LeDock_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] # self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] # print('No need", "for Bissecting, Radical and Extended radical plane # See McConkey et al. (2002)", "+= rgnopt_locclf final_str += optimz1 final_str += optimz2 for y in flexible_index_list_phrases: final_str", "# except Exception as e: # print(\"error in Sample runSim: \", e) #", "no permeability ROTPER 0.8 # Solvent term penalty # When the value is", ">>> EPI_uber_dock.run_rDock_simulation(parallel=True, waitTime=15) >>> >>> #This is for FlexAid >>> EPI_uber_dock.prepare_flexaid_settings() >>> EPI_uber_dock.process_flexaid_ligand()", "mol2 for receptor and sd for ligand :return: ''' # self.output_receptor_rdock = Outputfile(\"mol2\",", "bond of the ligand # The allowable flexible bonds are listed as FLEDIH", ".split('/')[-1] self.directories_samples = self.state_data_samples['directory'] self.setup_box = self.state_data_samples['setup'] self.folder_exists = self.state_data_samples['folderCreated'] self.x_center = self.state_data_samples['boxSettings']['center_x']", "in enumerate(submitted_jobs_dask): status = job.status if status == 'finished': test = 1 #", "\"--ligand {1} \" \\ \"--center_x {2} \" \\ \"--center_y {3} \" \\ \"--center_z", "except KeyboardInterrupt: # # quit # sys.exit() print(\"Vina sample run command prep finished\")", "def find_sample_files(self, folder): try: VIP = [] for dirname, dirnames, filenames in os.walk(folder):", "test = 1 #self.g_mmpbsa_sim_states = self.state_data['energySoftware']['g_mmpbsa']['simStates'] #self.ledock_samples = self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list'] # Divide trajectory to", "Adaptive crossover and mutation probabilities # Floats in interval [0.0,1.0] ADAPTKCO 0.95 0.10", "This part runs the main program submitted_jobs = [] submitted_jobs_dask = [] queue_jobs", "self.directories_samples, 'setup': self.setup_box, 'folderCreated': self.folder_exists_samples, 'simStates': {}}) self.prepVinaSim_samples() self.save_state_data_json(filedata=self.state_data_samples, filename=self.json_samples_state_file) self.load_state_called_samples = False", "as hlp # matplotlib.style.use('ggplot') sns.set(style=\"darkgrid\") low_seed = 1 high_seed = 999999999 mgltools_utilities =", "print('Please setup vina box settings') # except Exception as e: # print(\"error in", "self.state_data['tprFile'] self.index_file = self.state_data['indexFile'] self.folder_path = self.state_data['folderPath'] self.run_type = self.state_data['runType'] self.molecule_name = self.state_data['molName']", "pybel Stores molecule information in pandas dataframe as well as numpy list. Read", "# # for worker in get_worker_free: # preped = get_worker_free[worker]['preped'] # total_free_cores +=", "NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF", "'c', 'cornflowerblue', 'gold', 'darkorange']) import multiprocessing import mdtraj as md from molmolpy.utils.cluster_quality import", "for big files for upload G_MMPBSA files # test = 1 # tasks_upload", "self.absolute_ligand_ledock_mol2 = self.ledock_absolute_folder_name + os.sep + self.ligand_ledock_mol2 self.receptor_pybel.write(\"pdb\", self.absolute_receptor_ledock_pdb, overwrite=True) self.ligand_pybel.write(\"mol2\", self.absolute_ligand_ledock_mol2, overwrite=True)", "parallel #div_traj = math.trunc(traj_len/total_free_cores) select_frames = list(range(0,traj_len,div_traj)) select_indexes = list(range(len(select_frames))) folder_to_save = self.g_mmpbsa_folder", "lepro ran fine print('Updated receptor with LePro\\n') os.chdir(self.uber_dock_folder) self.state_data['dockSoftware']['LeDock'].update( {'receptor_pdb': self.receptor_ledock_pdb, 'ligand_mol2': self.ligand_ledock_mol2,", "jobs') while len(finished_jobs) != job_quantity: finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) time.sleep(60) print('->'", "# Float in interval [0.5,3.0] CLRMSD 2.0 # Number of results/docking poses to", ">>> molname = 'EPI' >>> receptor_name = 'LasR' >>> run_type = 'vina_sample' >>>", "yolo') # # test = 1 # # try: # self.setup_ledock_pameters = self.ledock_data['setup_LeDock']", "{2}\\n'.format(self.flexaid_res_number, self.flexaid_res_chain, flex_index) flexible_index_list_phrases.append(temp_line) test = 1 final_str += pdbnam final_str += inplig", "'.inp' return generate_config_input_file, final_str # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param': self.generate_ga_dat_pameters,", "'darkorange']) import multiprocessing import mdtraj as md from molmolpy.utils.cluster_quality import * from molmolpy.utils", "When the value is 0.0 the solvent interactions are derived from the interaction", "# Exhaustiveness for all samples # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories_samples = folder_utils.find_folder_in_path(self.folder_path,", "= self.state_data.copy() ############################################################################################## def flexaid_generate_config_input_dask(self): ''' Generate flexaid config input file Flexaid is", "'OPTIMZ 9999 {0} 0\\n\\n'.format(self.flexaid_res_chain) # Add one extra line for each flexible bond", "= False self.folder_exists = False # Running vina,whether it's for exhaustiveness or traditional", "async threads, one checks finished simulations, other submits jobs ############################################################################################### gmmbpsa_min_mem = 1000", "16 jobs_running = len(submitted_jobs_dask) - len(finished_jobs) max_jobus = max_jobs_to_run # g_mmpbsa part if", "in interval [0.5,3.0] CLRMSD 2.0 # Number of results/docking poses to output MAXRES", "test = 1 curr_client = dask_client # Testing Phase total_free_cores = 16 #", "= 'EPI' >>> receptor_name = 'LasR' >>> run_type = 'vina_sample' >>> >>> >>>", "self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) # self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) except: print('G_mmpbsa is", "test = 1 # tasks_upload = [] # big_future = client.scatter(self.dask_prep, broadcast=True) #", "+ self.ledock_folder_name test = 1 # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.ledock_directories = folder_utils.find_folder_in_path(self.uber_dock_folder,", "is NUMCHROM PRINTCHR 10 ''' self.generate_ga_dat_pameters = True self.generate_ga_dat = 'ga_inp_' + self.receptor_name", "self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] print('No need to generate LeDock commands') except: self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list': self.ledock_samples}) self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states) for sample_num", "== 'g_mmpbsa': sample_num = results[key]['part_num'] results_dask = results[key]['dask'] original_data = self.state_data['energySoftware'][prog] abs_folder =", "settings') # except Exception as e: # print(\"error in Sample runSim: \", e)", "1 # ############################################################################################### # # # work_address = workstation1_preped['workerAddress'] # # # #", "Outputfile(\"mol2\", \"{0}.mol2\".format(self.receptor_name)) # self.output_receptor_rdock.write(self.receptor_pybel) # self.output_receptor_rdock.close() # # self.output_ligand_rdock = Outputfile(\"sd\", \"{0}.sd\".format(self.ligand_name)) #", "return VIP except Exception as e: print(\"error in find_files: \", e) sys.exit(0) def", "self.index_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile': self.mdp_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile': self.tpr_file}) self.save_state_data_json() test = 1 #self.g_mmpbsa_sim_states = self.state_data['energySoftware']['g_mmpbsa']['simStates'] #self.ledock_samples", "this part needs to be thought out #################################################################################################################### def prepare_samples_collection_run(self, standard_exhaust=128, num_samples_run=100, run_type='samples_run'):", "+ self.molecule_name + '_' + self.run_type_samples + '.json' # This will hold information", "0: print('Creating rdock folder in uberDocker folder \\n') print(self.ledock_directories) folder_utils.create_folder(self.ledock_absolute_folder_name) test = 1", "# # try: # self.LeDock_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] # self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] # print('No", "out_name = abs_folder + os.sep + results_dask['out_filename'] out_mem = results_dask['out_mem'] out_file = open(out_name,", "FOLDER HERE # #big_future = client.scatter(pop_item, workers=[workstation_address], hash=False) big_future = pop_item task_g_mmpbsa =", "in command_receptor = self.ledock_path + os.sep + 'ledock_linux_x86' sample_data = self.ledock_input_info[str(sample_num)] parm_name =", "finished :))))))') print('---' * 10) print('\\n') def prepare_for_dask_cluster(self, LeDock=2, rDock=2, FlexAid=2, Vina=2, parallel=False):", "1 pop_item.update({'workingDir':workstation_dir}) submitted_jobs.append(pop_item) # MAYBE CHECK FOLDER HERE # #big_future = client.scatter(pop_item, workers=[workstation_address],", "''' Generate GA dat parameters for flexaid docking :return: ''' self.flexaid_ga_dat_param_template = '''#", "the following conditions are # met: # * Redistributions of source code must", "(0) optimz2 = 'OPTIMZ 9999 {0} 0\\n\\n'.format(self.flexaid_res_chain) # Add one extra line for", "# In our example, Biotin has 5 flexible bonds flexible_bonds_data = open( self.flexaid_absolute_processed_files_folder", "and jobs_running <max_jobus: print('Submit MMPBSA job to DASK') pop_item = queue_jobs.pop(curr_index) key_name =", "box settings') # except Exception as e: # print(\"error in Sample runSim: \",", "flexible_bonds_data.close() flexible_bonds_data_text_list = flexible_bonds_data_text.split('\\n') flexible_index_list_phrases = [] flexible_index_list = [] for i in", "run_docking_FlexAid[curr_FlexAid:curr_FlexAid + FlexAid] # # curr_FlexAid += FlexAid # # run_docking_queue += run_docking_Vina[curr_Vina:curr_Vina", "= self.absolute_json_state_file # filedata = self.state_data self.save_state_data_json(filedata=self.before_dask, filename=self.absolute_json_state_file) # allow CPU to cool", ">>> EPI_uber_dock.get_flexaid_clefts() >>> EPI_uber_dock.flexaid_generate_ga_dat_parameters() >>> EPI_uber_dock.flexaid_generate_config_input() >>> EPI_uber_dock.prep_FlexAid_dock_run_commands() >>> EPI_uber_dock.run_FlexAid_simulation(parallel=True, waitTime=15) >>> >>>", "json configuration :return: ''' self.run_folder_name = self.receptor_name + '_' + self.molecule_name + '_'", "SHAREPEK 5.0 SHARESCL 10.0 # Reproduction model # Values in [BOOM,STEADY] REPMODEL BOOM", "= True else: self.load_state_file_samples = self.json_samples_state_file self.load_state_called_samples = True self.load_samples_state_data_json(self.load_state_file_samples) self.prep_sample_run = True", "(INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS", "to be considered as in the same neighborhood. >>> LasR_MOR_mmpbsa_calc = g_mmpbsa_dask.GMMPBSAObject(traj, topol_file,", "############################################################ # submitted_jobs_dask len less than 16 jobs_running = len(submitted_jobs_dask) - len(finished_jobs) max_jobus", "will hold information about run states if len(self.directories_samples) == 0: print('Creating folder for", "interval [0.0,1.0] from fully permeable to no permeability PERMEA 0.9 # Permeability for", "] # self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') # self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) # self.generate_ga_dat_object_file.close() # # self.state_data['dockSoftware']['FlexAid'].update({'GA_params':", "folder_path self.command_run_list = [] self.command_samples_run_list = [] self.molecule_name = molname self.ligand_name = molname", "self.state_data.copy() self.save_state_data_json() # TODO this part needs to be thought out #################################################################################################################### def", "# Testing Phase total_free_cores = 16 # Production # worker_status = run_dask_tools.get_dask_worker_status(curr_client) #", "[] submitted_jobs_dask = [] queue_jobs = self.run_mmpbsa_dask job_quantity = len(queue_jobs) finished_jobs = []", "0 CROSRATE 0.90 # Constant mutation probability # Float in interval [0.0,1.0] #", "open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contribMM_filename'] out_mem =", "or multiple cleft(s) as binding-site rgnopt_locclf = 'RNGOPT LOCCLF ' + 'global_binding_site.pdb\\n\\n' #", "self.ledock_data['LeDockInputInfo'] # self.param_ledock_template = self.ledock_data['paramFull'] # except: # print('LeDock setting part is empty", "json configuration >>> EPI_uber_dock.prepare_rdock_settings() Convert with pybel to mol2 for receptor and sd", "self.setup_box is not False: # print(\"Running Vina\") # TODO need to think about", "individuals to print in console # Integer in interval [1,N] where N is", "workers=[worker_address], # key='key_scatter_{0}'.format(worker_address), # retries=retries_num) # tasks_upload.append(task) # print(\"Starting uploading to \", worker_address)", "+ self.ligand_name + '.dat' self.generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat self.generate_ga_dat_object_file =", "(always removed by default) # Only considered if EXCHET is disabled # To", "'GA_ParamFull': self.flexaid_ga_dat_param_template}) # # # self.state_data_samples = self.state_data.copy() # # self.save_state_data_json() # TODO", "set_flexaid_path(self, path): print('FlexAid path is set to ', path) self.flexaid_path = path def", "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING", "mdp_abs= full_g_mmpbsa_data['mdpFile'] mdp_file = open(mdp_abs, 'r') mdp_mem = mdp_file.read() mdp_filename = mdp_abs.split(os.sep)[-1] index_abs=", "= open(mdp_abs, 'r') mdp_mem = mdp_file.read() mdp_filename = mdp_abs.split(os.sep)[-1] index_abs= full_g_mmpbsa_data['indexFile'] index_file =", ">>> LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client) >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True) >>> # >>> # LasR_MOR_mmpbsa_calc.run_dask_docking(client) Notes -----", "in self.ledock_samples: self.prep_LeDock_dock_command(sample_num) print('Now continue for LeDock:D') self.save_state_data_json() test = 1 self.prep_LeDock_run =", "VCT # Do not consider intramolecular interactions NOINTR # Side-chain rotamer acceptance threshold", "'CONFIG_' + self.receptor_name + '-' + self.ligand_name + '.inp' return generate_config_input_file, final_str #", "self.state_data['indexFile'] self.folder_path = self.state_data['folderPath'] self.run_type = self.state_data['runType'] self.molecule_name = self.state_data['molName'] self.receptor_name = self.state_data['receptorName']", "def flexaid_generate_ga_dat_parameters_dask(self): ''' Generate GA dat parameters for flexaid docking :return: ''' self.flexaid_ga_dat_param_template", "'_' + self.molecule_name + '_' + self.run_type_samples self.sim_folder_run_samples = self.folder_path + os.sep +", "hold_nSec(self, n): for i in range(1, n + 1): print(i) time.sleep(1) # Delay", "self.samples_exhaust, self.num_modes, self.save_run_name, self.save_run_name) print(command_to_run) self.command_samples_run_list.append(command_to_run) print(\"Launching new Sim\") self.state_data_samples['simStates'].update({str(sample_num): {'save_run_name': self.save_run_name, 'commandRun':", "open(tpr_abs, 'rb') tpr_mem = tpr_file.read() tpr_filename = tpr_abs.split(os.sep)[-1] # mdp_abs= full_g_mmpbsa_data['mdpFile'] mdp_file =", "except: # print('LeDock setting part is empty verify yolo') # # try: #", "VIP.append(i) # This is not necessary since info is inside pdbqt file #", "{0} 0\\n\\n'.format(self.flexaid_res_chain) # Add one extra line for each flexible bond of the", "test = 1 # # test = 1 ################################################################################################### test = 1 ####################################################################################################", "MAXRES 20 # Only output scored atoms in the final results # Comment", "self.g_mmpbsa_prepared = True self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list})", "'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # # # self.state_data_samples = self.state_data.copy() # #", "source and binary forms, with or without # modification, are permitted provided that", "= abs_folder + os.sep + results_dask['polar_filename'] out_mem = results_dask['polar_mem'] out_file = open(out_name, 'w')", "run_dask_tools test = 1 curr_client = client worker_status = run_dask_tools.get_dask_worker_status(curr_client) get_worker_free = run_dask_tools.check_free_resources(worker_status)", "[] # big_future = client.scatter(self.dask_prep, broadcast=True) # for worker in get_worker_free: # worker_info", "data['dask'].update({'mdpName': mdp_filename, 'mdpMem': mdp_mem}) data['dask'].update({'indexName': index_filename, 'indexMem': index_mem}) test = 1 # data['dask'].update({'cavFile':cav_file_mem", "= curr_item['Program'] ############################################################ # submitted_jobs_dask len less than 16 jobs_running = len(submitted_jobs_dask) -", "water molecules in the target (always removed by default) # Only considered if", "interval [-200.0,200.0] SLVPEN 0.0 # Use Vcontacts indexing VINDEX # Vcontacts plane definition", "self.g_mmpbsa_prepared}) # self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) # self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) except:", "self.first_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex': self.second_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile': self.index_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile': self.mdp_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile': self.tpr_file}) self.save_state_data_json() test = 1", "# disclaimer in the documentation and/or other materials provided # with the distribution.", "+ os.sep + 'centroid_model_clust2.pdbqt' >>> ligand_file = EPI_folder + os.sep + 'EPI.pdbqt' >>>", "vina,whether it's for exhaustiveness or traditional run self.folder_path = folder_path self.command_run_list = []", "O(n.d) where d is the average number of neighbors, while original DBSCAN had", "data_pre for part_num in full_g_mmpbsa_data['parts']: # self.run_FlexAid_sim(FlexAid_sample_num, waitTime=waitTime) data = self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)] save_run_name =", "about run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() if len(self.ledock_directories) == 0: print('Creating rdock", "clusters # Float in interval [0.5,3.0] CLRMSD 2.0 # Number of results/docking poses", "# test = 1 run_mmpbsa_queue = run_g_mmpbsa # run_docking_queue = run_docking_LeDock + run_docking_FlexAid", "'_' + self.run_type + '.json' # This will hold information about run states", "= len(submitted_jobs_dask) - len(finished_jobs) max_jobus = max_jobs_to_run # g_mmpbsa part if curr_item_prog ==", "index_file.read() index_filename = index_abs.split(os.sep)[-1] # data_pre = self.state_data['energySoftware']['g_mmpbsa'] # data_pre.update({'dask': {}}) data_pre =", "as numpy list. Read more in the :ref:`User Guide <MoleculeObject>`. Parameters ---------- filename", "# self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) # self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) except: print('G_mmpbsa is empty verify", "+ '_' + self.molecule_name + '_' + self.run_type_samples self.sim_folder_run_samples = self.folder_path + os.sep", "Vina # # test = 1 # test = 1 run_mmpbsa_queue = run_g_mmpbsa", "= 'vina_sample' >>> >>> >>> >>> receptor_file = EPI_folder + os.sep + 'centroid.pdb'", "method (genetic-algorithms) METOPT GA # The variation in degrees for the anchor angle", "receptor_name # This might need to get modified def find_sample_files(self, folder): try: VIP", "= self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name) print('TADA ', self.directories) self.json_state_file = self.sim_folder_run", "self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples = self.state_data.copy() ############################################################################################## def flexaid_generate_config_input_dask(self): ''' Generate", "1 # # total_free_cores = 0 # # for worker in get_worker_free: #", "sample_num) random_seed = np.random.randint(low_seed, high_seed) command_to_run = \"{0} {1}\".format(command_receptor, parm_name) ligand_clear_dok = sample_data['ligand_clear_name']", "# Float in interval [0.0,1.0] # Only considered when ADAPTVGA is 0 CROSRATE", "print(\"Starting uploading to \", worker_address) test = 1 # TODO # This part", "500 # Use Adaptive Genetic-Algorithm # Value of 0 or 1 ADAPTVGA 1", "save_run_name = \"g_mmpbsa_part_{0}\".format(part_num) data.update({'Program': 'g_mmpbsa'}) data.update({'part_num': part_num}) data.update({'save_run_name': save_run_name}) data.update({'dask': {}}) traj_abs =", "{'g_mmpbsa': {}}} self.state_data.update({'trajectoryFile': self.trajectory_file, 'mdpFile': self.mdp_file, 'tprFile': self.tpr_file, 'indexFile': self.index_file, 'runFolderName': self.run_folder_name, 'folderPath':", "interactions are derived from the interaction matrix # Float in interval [-200.0,200.0] SLVPEN", "set to ', path) self.mgltools_utilities = path def set_flexaid_path(self, path): print('FlexAid path is", "# Rotational DOF of the ligand (0) optimz2 = 'OPTIMZ 9999 {0} 0\\n\\n'.format(self.flexaid_res_chain)", "# test = 1 ################################################################################################### test = 1 #################################################################################################### self.run_mmpbsa_dask = run_g_mmpbsa curr_LeDock", "os.sep + self.run_folder_name def prepare_g_mmpbsa_dask_protocol(self, dask_client=None, prep_g_mmpbsa=True): ''' prepare dask tasks for g_mmpbsa", "+ '-' + self.ligand_name + '.dat' self.generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat", "+ '{0}.inp'.format(self.ligand_flexaid_initials), 'r') flexible_bonds_data_text = flexible_bonds_data.read() flexible_bonds_data.close() flexible_bonds_data_text_list = flexible_bonds_data_text.split('\\n') flexible_index_list_phrases = []", "Ends reading of CONFIG file ENDINP ''' final_str = '''''' # Specify the", "parsers can be used: 1.molmolpy 2. pybel Stores molecule information in pandas dataframe", "i in range(1, n + 1): print(i) time.sleep(1) # Delay for 1 sec", "cutoff between clusters # Float in interval [0.5,3.0] CLRMSD 2.0 # Number of", "in i: print(i) temp = i.split(' ') print(temp) flex_index = temp[-2] flexible_index_list.append(int(flex_index)) temp_line", "the ligand # Float in interval [1.0-30.0] VARFLX 10.0 # Use Vcontacts in", "for an example. This implementation bulk-computes all neighborhood queries, which increases the memory", ">>> EPI_uber_dock.run_uber_dock_protocol() Use together >>> self.prepare_uber_dock_protocol() for preparation >>> self.run_uber_dock_protocol() or seperately >>>", "= pop_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Cur run ', run_name) if curr_index ==", "self.ledock_data['LeDock_params']['receptorFile'] # self.ledock_rmsd = self.ledock_data['LeDock_params']['LeDockRMSD'] # # self.ledock_xmin = self.ledock_data['LeDock_params']['xmin'] # self.ledock_xmax =", "EPI_uber_dock.run_rDock_simulation(parallel=True, waitTime=15) >>> >>> #This is for FlexAid >>> EPI_uber_dock.prepare_flexaid_settings() >>> EPI_uber_dock.process_flexaid_ligand() >>>", "'{0}\\n\\n'.format( self.receptor_flexaid_mol2) # Specify the processed ligand file to use # BTN.inp has", "flexaid_config_input_template generate_config_input_file = 'CONFIG_' + self.receptor_name + '-' + self.ligand_name + '.inp' return", "= [] temp_mdtraj_indexes = [] file_save_list = [] abs_file_save_list = [] simStates =", "# # break # # test = 1 # print('Last Check of submitted", "the population) # Integer in interval [1-N] NUMCHROM 500 # Number of generations", "tasks for g_mmpbsa :return: ''' self.prepare_g_mmpbsa() test = 1 curr_client = dask_client #", "# # test = 1 # test = 1 run_mmpbsa_queue = run_g_mmpbsa #", "will hold information about run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() ######################################################################################## # #", "get modified def find_sample_files(self, folder): try: VIP = [] for dirname, dirnames, filenames", "with LePro\\n') os.chdir(self.uber_dock_folder) self.state_data['dockSoftware']['LeDock'].update( {'receptor_pdb': self.receptor_ledock_pdb, 'ligand_mol2': self.ligand_ledock_mol2, 'lepro_pdb': self.lepro_pdb_file, 'lepro_abs_pdb': self.ledock_absolute_folder_name +", "+= pdbnam final_str += inplig final_str += rgnopt_locclf final_str += optimz1 final_str +=", "for rDock, and it works so comment this part for a while >>>", "print('No need to generate LeDock commands') except: self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list': self.ledock_samples}) self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states) for sample_num in", "out_file.write(out_mem) out_file.close() # out_pdbqt_filename = out_pdbqt_name # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = \\ # results[key]", "have pass' % (n)) @hlp.timeit def prepVinaSampleCommand(self, sample_num): # try: if self.setup_box is", "self.g_mmpbsa_folder = self.state_data['RunFolder'] self.json_state_file = self.state_data['jsonStates'] test = 1 # self.rdock_folder_name = self.receptor_name", "VARFLX 10.0 # Use Vcontacts in the calculations of surfaces in contact COMPLF", "documentation and/or other materials provided # with the distribution. # * Neither the", "= '/media/Work/MEGA/Programming/StressHormones/dock_EPI' >>> EPI_samples = '/media/Work/MEGA/Programming/StressHormones/' >>> >>> >>> receptor_file = EPI_folder +", "to no permeability ROTPER 0.8 # Solvent term penalty # When the value", "rDock # # run_docking_queue += run_docking_FlexAid[curr_FlexAid:curr_FlexAid + FlexAid] # # curr_FlexAid += FlexAid", "self.state_data['energySoftware']['g_mmpbsa']['fileList'] self.abs_file_save_list = self.state_data['energySoftware']['g_mmpbsa']['absFileList'] self.simStates = self.state_data['energySoftware']['g_mmpbsa']['simStates'] test = 1 self.g_mmpbsa_prepared = self.state_data['energySoftware']['g_mmpbsa']['prepare']", "test # big_future = self.dask_prep # run_dask_tools.upload_g_mmpbsa_files_dask(big_future) #TODO # Scatter a lot better", "= data test = 1 run_g_mmpbsa.append(data) # # result = run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num, data) #", "receptor_name) >>> >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client) >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True) >>> # >>>", "self.param_ledock_template = self.ledock_data['paramFull'] # except: # print('LeDock setting part is empty verify yolo')", "= self.get_uber_g_mmpbsa_run_folder_name() self.absolute_json_state_file = self.g_mmpbsa_folder + os.sep + self.receptor_name + '_' + self.molecule_name", "folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name_samples) print('TADA ', self.directories_samples) self.json_samples_state_file = self.sim_folder_run_samples + os.sep + self.receptor_name +", "= 'polar_{0}.xvg'.format(i) apolar = 'apolar_{0}.xvg'.format(i) contrib_mm = 'contrib_MM_{0}.dat'.format(i) contrib_pol = 'contrib_pol_{0}.dat'.format(i) contrib_apol =", "g_mmpbsa run folder and initial json configuration :return: ''' self.run_folder_name = self.receptor_name +", "self.directories, 'folderCreated': self.folder_exists, 'simStates': {}}) self.state_data.update(programs_dict) # self.prepVinaSim_exhaust() self.save_state_data_json() self.load_state_called = False else:", "molname = 'EPI' >>> receptor_name = 'LasR' >>> run_type = 'vina_sample' >>> >>>", "self.molecule_name = self.state_data['molName'] self.receptor_name = self.state_data['receptorName'] # TODO test self.sim_folder_run = self.state_data['simRunFolder'] #", "all clients client.upload_file # task = client.submit(run_dask_tools.upload_g_mmpbsa_files_dask, # big_future, # workers=[worker_address], # key='key_scatter_{0}'.format(worker_address),", "open(index_abs, 'r') index_mem = index_file.read() index_filename = index_abs.split(os.sep)[-1] # data_pre = self.state_data['energySoftware']['g_mmpbsa'] #", "= data['absFileSave'] traj_file = open(traj_abs, 'rb') traj_mem = traj_file.read() traj_filename = data['fileSave'] data['dask'].update({'tprName':", "VINDEX # Vcontacts plane definition # Value in [B,R,X] for Bissecting, Radical and", "further refinement # # # break # # test = 1 # print('Last", "finished_jobs, finished_jobs_dict): import copy # modified_submitted_jobs_dask = copy.deepcopy(submitted_jobs_dask) for i, job in enumerate(submitted_jobs_dask):", "INTRAGEN # Specifies that the initial population is generated randomly POPINIMT RANDOM #", "= self.ledock_data['LeDock_params']['title'] # self.rdock_title = self.ledock_data['LeDock_params']['title'] # # self.receptor_file_ledock = self.ledock_data['LeDock_params']['receptorFile'] # self.ledock_rmsd", "= 1 self.save_run_name = \"ledock_{0}_sample_{1}\".format(self.run_type, sample_num) random_seed = np.random.randint(low_seed, high_seed) command_to_run = \"{0}", "run_docking_queue += run_docking_FlexAid[curr_FlexAid:curr_FlexAid + FlexAid] # # curr_FlexAid += FlexAid # # run_docking_queue", "[0.0,1.0] ADAPTKCO 0.95 0.10 0.95 0.10 # Constant crossover probability # Float in", "# need [0] key sample_num = results[key]['part_num'] if prog == 'g_mmpbsa': sample_num =", "'g_mmpbsa': if workstation_freemem > gmmbpsa_min_mem and jobs_running <max_jobus: print('Submit MMPBSA job to DASK')", "= self.ledock_absolute_folder_name + os.sep + self.receptor_ledock_pdb self.absolute_ligand_ledock_mol2 = self.ledock_absolute_folder_name + os.sep + self.ligand_ledock_mol2", "self.ledock_folder_exists = True test = 1 # TODO enter ledock folder and process", "os.system(command_receptor) self.lepro_pdb_file = 'pro.pdb' # Need to check whteter lepro ran fine print('Updated", "is 0 MUTARATE 0.10 # Crossover operator # Intragenic crossovers are possible INTRAGEN", "# Values in [BOOM,STEADY] REPMODEL BOOM # Fraction of population to create #", "been created') self.trajectory_file = traj self.topology_file = topol self.tpr_file = tpr_file self.mdp_file =", "= flexible_bonds_data.read() flexible_bonds_data.close() flexible_bonds_data_text_list = flexible_bonds_data_text.split('\\n') flexible_index_list_phrases = [] flexible_index_list = [] for", "\"--center_x {2} \" \\ \"--center_y {3} \" \\ \"--center_z {4} \" \\ \"--size_x", "EPI_uber_dock.calculate_max_radius_from_com() >>> EPI_uber_dock.calculate_cube_edges() >>> EPI_uber_dock.calculate_box_edges_from_com() >>> >>> >>> EPI_uber_dock.prepare_uber_docker() >>> >>> >>> #This", "= self.state_data['folderCreated'] self.absolute_json_state_file = self.state_data['absoluteJsonStates'] self.g_mmpbsa_folder = self.state_data['RunFolder'] self.json_state_file = self.state_data['jsonStates'] test =", "client=None, max_jobs_to_run=10): # from molmolpy.moldock import run_dask_tools from molmolpy.tools import run_dask_tools test =", "rDock, and it works so comment this part for a while >>> EPI_uber_dock.prepare_rdock_settings()", "FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT", "= self.folder_path + os.sep + self.run_folder_name_samples # Create folder don't forget # Exhaustiveness", "def prepare_g_mmpbsa(self): ''' Prepare g_mmpbsa run folder and initial json configuration :return: '''", "''' self.generate_ga_dat_pameters = True self.generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' + self.ligand_name", "e: # print(\"error in Sample runSim: \", e) # sys.exit(0) def get_molecule_name(self): return", "self.save_state_data_json() test = 1 #self.g_mmpbsa_sim_states = self.state_data['energySoftware']['g_mmpbsa']['simStates'] #self.ledock_samples = self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list'] # Divide trajectory", "contributors may be used to endorse or promote products derived # from this", "self.num_modes = self.state_data_samples['boxSettings']['numModes'] def hold_nSec(self, n): for i in range(1, n + 1):", "# # # test = 1 # # total_free_cores = 0 # #", "self.molecule_name = mol_name def set_receptor_name(self, receptor_name): self.receptor_name = receptor_name # This might need", "# Translational DOF of the ligand (-1) optimz1 = 'OPTIMZ 9999 {0} -1\\n\\n'.format(self.flexaid_res_chain)", "\", e) sys.exit(0) def find_sample_folders(self, folder_path='.', dir_name='vina_sample'): try: dir_names = [] for dirname,", "= client.submit(run_dask_tools.run_vina_using_dask, # # data, # # workers=[work_address], # # key='key_test', # #", "ROTPER 0.8 # Solvent term penalty # When the value is 0.0 the", "following disclaimer. # * Redistributions in binary form must reproduce the above #", "# # run_docking_queue += run_docking_Vina[curr_Vina:curr_Vina + Vina] # curr_Vina += Vina # #", "prepare_g_mmpbsa_dask_protocol(self, dask_client=None, prep_g_mmpbsa=True): ''' prepare dask tasks for g_mmpbsa :return: ''' self.prepare_g_mmpbsa() test", "out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contribMM_filename'] out_mem = results_dask['contribMM_mem'] out_file", "disclaimer. # * Redistributions in binary form must reproduce the above # copyright", "self.ledock_data['setup_LeDock'] # self.ledock_num_samples = self.ledock_data['num_samples'] # self.ledock_input_info = self.ledock_data['LeDockInputInfo'] # self.param_ledock_template = self.ledock_data['paramFull']", "# Float in interval [1.0-30.0] VARANG 5.0 # The variation in degrees for", "', self.ledock_directories) test = 1 # This will hold information about run states", "prep_mdtraj_object(self): ''' Prepare receptor mdtraj object get mdtraj topology and save as pandas", "+= flexaid_config_input_template generate_config_input_file = 'CONFIG_' + self.receptor_name + '-' + self.ligand_name + '.inp'", "self.save_run_name) print(command_to_run) self.command_samples_run_list.append(command_to_run) print(\"Launching new Sim\") self.state_data_samples['simStates'].update({str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'runFinished': False}})", "= self.ledock_path + os.sep + 'ledock_linux_x86' sample_data = self.ledock_input_info[str(sample_num)] parm_name = sample_data['ledock_parm_name'] test", "\\ \"--size_z {7} \" \\ \"--exhaustiveness {8} \" \\ \"--num_modes {9} \" \\", "LeDock commands') # self.prep_LeDock_run = True # except: # print('LeDock_params simStates is empty", ">>> # Prepare and run Dock programs >>> EPI_uber_dock.prep_rDock_dock_run_commands() >>> EPI_uber_dock.run_rDock_simulation(parallel=True, waitTime=15) >>>", "is True: # # self.prepare_uber_docker() # self.prepare_ledock_settings() # self.prep_LeDock_dock_run_commands() @hlp.timeit def prep_LeDock_dock_run_commands(self, num_samples=10):", "Vina\") # TODO need to think about seed #./ ledock_linux_x86 dock. in command_receptor", "json.dump(self.cluster_models, outfile) # pickle.dump(self.cluster_models, open(filename, \"wb\")) # TODO create folder for run saving", "curr_worker = 0 # prepare worker ids for easier switch worker_ids = {}", "# work_address = workstation1_preped['workerAddress'] # # # # # This is to run", "out_file.close() out_name = abs_folder + os.sep + results_dask['contrib_apol_filename'] out_mem = results_dask['contrib_apol_mem'] out_file =", "= path def prep_mdtraj_object(self): ''' Prepare receptor mdtraj object get mdtraj topology and", "# This is to run on dask server # # # # #", "continue for LeDock:D') self.save_state_data_json() test = 1 self.prep_LeDock_run = True @hlp.timeit def prep_LeDock_dock_command(self,", "for dirname, dirnames, filenames in os.walk(folder_path): # print(dirname, '-') if dir_name in dirname:", "# -spli MOR_flexaid.dok command_to_clean = \"{0} -spli {1}\".format(command_receptor, ligand_clear_dok) print(command_to_run) self.LeDock_command_run_list.append(command_to_run) print(\"Launching new", "# Create folder don't forget # Exhaustiveness for all samples # self.directories =", "form must reproduce the above # copyright notice, this list of conditions and", "select_frames self.mdtraj_sliced = temp_mdtraj self.mdtraj_parts = temp_mdtraj_indexes self.file_save_list = file_save_list self.abs_file_save_list = abs_file_save_list", "self.ledock_path + os.sep + 'lepro_linux_x86' + ' {0} '.format(self.receptor_ledock_pdb) os.system(command_receptor) self.lepro_pdb_file = 'pro.pdb'", "i: print(i) temp = i.split(' ') print(temp) flex_index = temp[-2] flexible_index_list.append(int(flex_index)) temp_line =", "data.update({'Program': 'g_mmpbsa'}) data.update({'part_num': part_num}) data.update({'save_run_name': save_run_name}) data.update({'dask': {}}) traj_abs = data['absFileSave'] traj_file =", "in interval [-200.0,200.0] SLVPEN 0.0 # Use Vcontacts indexing VINDEX # Vcontacts plane", "{1}\".format(command_receptor, parm_name) ligand_clear_dok = sample_data['ligand_clear_name'] + '.dok' # -spli MOR_flexaid.dok command_to_clean = \"{0}", "try: # os.system(command_to_run) # except KeyboardInterrupt: # # quit # sys.exit() print(\"LeDock command", "if pre_job['runFinished'] is False: final_queue_job.append(pre_job) test = 1 self.run_mmpbsa_dask = final_queue_job # random.shuffle(self.run_docking_queue)", "is for rDock, and it works so comment this part for a while", "= open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['contribMM_filename'] out_mem", "= \"{0} -spli {1}\".format(command_receptor, ligand_clear_dok) print(command_to_run) self.LeDock_command_run_list.append(command_to_run) print(\"Launching new Sim\") temp_dict = {str(sample_num):", "False self.g_mmpbsa_sim_states = {'simStates': {}} self.objects_loaded = False self.g_mmpbsa_prepared = False # This", "print('Cur run ', run_name) if curr_index == 0: curr_index = 0 else: curr_index", "+ os.sep + self.run_folder_name def get_samples_run_folder_name(self): curr_folder = os.getcwd() print(\"Yippie yi kay\", curr_folder)", "# TODO this part needs to be thought out #################################################################################################################### def prepare_samples_collection_run(self, standard_exhaust=128,", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR", "path): print('FlexAid path is set to ', path) self.flexaid_path = path def set_ledock_path(self,", "= 1 # # total_free_cores = 0 # # for worker in get_worker_free:", "How to save submitted jobs state print('-------') if curr_index == 0 and len(submitted_jobs_dask)", "# retries=retries_num) # # # TODO This part needs further refinement # #", "get_molecule_name(self): return self.molecule_name def get_receptor_name(self): return self.receptor_name def set_molecule_name(self, mol_name): self.molecule_name = mol_name", "# self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] # print('No need to generate LeDock commands') # self.prep_LeDock_run", "# except KeyboardInterrupt: # # quit # sys.exit() print(\"LeDock command generation finished\") else:", "= self.state_data['receptorName'] # TODO test self.sim_folder_run = self.state_data['simRunFolder'] # .split('/')[-1] self.directories = self.state_data['directory']", "= [] file_save_list = [] abs_file_save_list = [] simStates = {'simStates':{}} for i,traj", "= copy.deepcopy(get_worker_free) # TEST IT WORKS # queue_jobs = self.run_mmpbsa_dask # job_test =", "flexible_bonds_data = open( self.flexaid_absolute_processed_files_folder + os.sep + '{0}.inp'.format(self.ligand_flexaid_initials), 'r') flexible_bonds_data_text = flexible_bonds_data.read() flexible_bonds_data.close()", "d is the average number of neighbors, while original DBSCAN had memory complexity", "results_dask['apolar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep +", "Sim\") temp_dict = {str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'commandToClean':command_to_clean, 'dokFileName':ligand_clear_dok, 'runFinished': False}} self.LeDock_sim_states.update(temp_dict)", "Create MoleculeObject by parsing pdb or pdbqt file. 2 types of parsers can", "run_dask_tools.upload_g_mmpbsa_files_dask(big_future) #TODO # Scatter a lot better using scatter for big files for", "the anchor dihedral of the ligand # Float in interval [1.0-30.0] VARDIH 5.0", "for upload G_MMPBSA files # test = 1 # tasks_upload = [] #", "amino acids,cofactors,ligands) # To exclude these groups, uncomment the next line #EXCHET #", "= abs_folder + os.sep + results_dask['apolar_filename'] out_mem = results_dask['apolar_mem'] out_file = open(out_name, 'w')", "+ 'EPI.pdbqt' >>> molname = 'EPI' >>> receptor_name = 'LasR' >>> run_type =", "# # # break # # test = 1 # print('Last Check of", "# # self.receptor_file_ledock = self.ledock_data['LeDock_params']['receptorFile'] # self.ledock_rmsd = self.ledock_data['LeDock_params']['LeDockRMSD'] # # self.ledock_xmin =", "command_to_run = \"vina --receptor {0} \" \\ \"--ligand {1} \" \\ \"--center_x {2}", "Divide trajectory to number of free cores # TODO article Pagadala Software for", "'centroid.pdb' >>> ligand_file = EPI_folder + os.sep + 'EPI.pdb' >>> molname = 'EPI'", "disclaimer in the documentation and/or other materials provided # with the distribution. #", "TODO This part needs further refinement # # # break # # test", "flexible_bonds_data_text_list: if 'FLEDIH' in i: print(i) temp = i.split(' ') print(temp) flex_index =", "self.save_run_name, 'commandRun': command_to_run, 'commandToClean':command_to_clean, 'dokFileName':ligand_clear_dok, 'runFinished': False}} self.LeDock_sim_states.update(temp_dict) self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict) # try: # os.system(command_to_run)", "self.load_state_data_json(self.load_state_file) else: print('G_MMPBSA Object has been created') self.trajectory_file = traj self.topology_file = topol", "original_data['AbsFolder'] out_name = abs_folder + os.sep + results_dask['out_filename'] out_mem = results_dask['out_mem'] out_file =", "is False: final_queue_job.append(pre_job) test = 1 self.run_mmpbsa_dask = final_queue_job # random.shuffle(self.run_docking_queue) print('Finished preparing", "# # workers=[work_address], # # key='key_test', # # retries=retries_num) # # # TODO", "and/or other materials provided # with the distribution. # * Neither the name", "0.10 0.95 0.10 # Constant crossover probability # Float in interval [0.0,1.0] #", "SPACER 0.375 # Exclude hetero groups in the target (water,metal,modified amino acids,cofactors,ligands) #", "1 high_seed = 999999999 mgltools_utilities = '/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24' class GMMPBSAObject(object): \"\"\" Usage example >>>", "self.sim_folder_run, 'RunFolder': self.g_mmpbsa_folder, 'absoluteJsonStates': self.absolute_json_state_file, 'directory': self.directories, 'folderCreated': self.folder_exists, 'simStates': {}}) self.state_data.update(programs_dict) #", "solvent interactions are derived from the interaction matrix # Float in interval [-200.0,200.0]", "open(out_name, 'w') out_file.write(out_mem) out_file.close() # out_pdbqt_filename = out_pdbqt_name # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = \\", "i if 'out' in i: VIP.append(i) # This is not necessary since info", "= traj self.topology_file = topol self.tpr_file = tpr_file self.mdp_file = mdp_file self.index_file =", "'indexMem':index_mem}) self.dask_prep = data_pre for part_num in full_g_mmpbsa_data['parts']: # self.run_FlexAid_sim(FlexAid_sample_num, waitTime=waitTime) data =", "traj_mem = traj_file.read() traj_filename = data['fileSave'] data['dask'].update({'tprName': tpr_filename}) data['dask'].update({'mdpName': mdp_filename}) data['dask'].update({'indexName': index_filename}) data['dask'].update({'trajMem':traj_mem,", "+ os.sep + 'EPI.pdb' >>> molname = 'EPI' >>> receptor_name = 'LasR' >>>", "0\\n\\n'.format(self.flexaid_res_chain) # Add one extra line for each flexible bond of the ligand", "generate 20 poses :return: ''' try: if self.setup_ledock_pameters is not False: # print(\"Running", "+ self.run_type + '.json' # This will hold information about run states self.g_mmpbsa_folder", "intramolecular interactions NOINTR # Side-chain rotamer acceptance threshold # Float in interval [0.0-1.0]", "+ os.sep + 'centroid.pdb' >>> ligand_file = EPI_folder + os.sep + 'EPI.pdb' >>>", "Specify the processed ligand file to use # BTN.inp has the unique RESNUMC", "atoms despite including flexible side-chains #SCOLIG # Ends reading of CONFIG file ENDINP", "filedata # filename = self.absolute_json_state_file else: filedata = filedata filename = filename json.dump(filedata,", "interval [1,N-1] where N is NUMCHROM STEADNUM 950 # Number of TOP individuals", "# while len(run_docking_queue) != 40: # run_docking_queue += run_docking_LeDock[curr_LeDock:curr_LeDock + LeDock] # curr_LeDock", "= 'EPI' >>> receptor_name = 'LasR' >>> >>> >>> EPI_uber_dock = uber_docker.UberDockerObject(receptor_file, ligand_file,", "information in pandas dataframe as well as numpy list. Read more in the", "10 ''' self.generate_ga_dat_pameters = True self.generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' +", "traditional run self.folder_path = folder_path self.command_run_list = [] self.command_samples_run_list = [] self.molecule_name =", "results[key]['dask'] # else: # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)] = results[key] # if filename is None and", "than using the Penultimate Rotamer Library #ROTOBS # Defines the grid spacing of", "receptor_file = EPI_folder + os.sep + 'centroid_model_clust2.pdbqt' >>> ligand_file = EPI_folder + os.sep", "Try to load initial LeDock try: self.mdtraj_frames = self.state_data['energySoftware']['g_mmpbsa']['frames'] self.mdtraj_parts = self.state_data['energySoftware']['g_mmpbsa']['parts'] self.file_save_list", "that are not finished for pre_job in run_mmpbsa_queue: # print(pre_job) if pre_job['runFinished'] is", "submitted_jobs_dask = [] queue_jobs = self.run_mmpbsa_dask job_quantity = len(queue_jobs) finished_jobs = [] finished_jobs_dict", "self.ligand_ledock_mol2 = \"{0}.mol2\".format(self.ligand_name) self.absolute_receptor_ledock_pdb = self.ledock_absolute_folder_name + os.sep + self.receptor_ledock_pdb self.absolute_ligand_ledock_mol2 = self.ledock_absolute_folder_name", "BOOM BOOMFRAC 1.0 # Number of new individuals to generate at each generation", "import copy self.before_dask = copy.deepcopy(self.state_data) ################################################################################ if self.g_mmpbsa_prepared is True: full_g_mmpbsa_data = self.state_data['energySoftware']['g_mmpbsa']", "self.save_run_name, self.save_run_name) print(command_to_run) self.command_samples_run_list.append(command_to_run) print(\"Launching new Sim\") self.state_data_samples['simStates'].update({str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'runFinished':", "switch worker_ids = {} for i, id in enumerate(get_worker_free): worker_ids.update({i: id}) custom_index_curr =", "Float in interval [-200.0,200.0] SLVPEN 0.0 # Use Vcontacts indexing VINDEX # Vcontacts", "the ligand # Float in interval [1.0-30.0] VARDIH 5.0 # The variation in", "final_str += inplig final_str += rgnopt_locclf final_str += optimz1 final_str += optimz2 for", "CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,", "= 3 while len(queue_jobs) > 0: if curr_index == len(queue_jobs): curr_index = 0", "'rDock' # self.rdock_absolute_folder_name = self.uber_dock_folder + os.sep + self.rdock_folder_name # self.directories = self.find_sample_folders(self.folder_path,", "[0.5,3.0] CLRMSD 2.0 # Number of results/docking poses to output MAXRES 20 #", "a while >>> EPI_uber_dock.prepare_rdock_settings() >>> EPI_uber_dock.generate_rdock_cavity() >>> # Prepare and run Dock programs", "---------- \"\"\" def __init__(self, traj, topol, tpr_file, mdp_file, index_file, first_index, second_index, molname='Unknown', receptor_name='Unknown',", "def set_receptor_name(self, receptor_name): self.receptor_name = receptor_name # This might need to get modified", ".split('/')[-1] self.directories = self.state_data['directory'] self.folder_exists = self.state_data['folderCreated'] self.absolute_json_state_file = self.state_data['absoluteJsonStates'] self.g_mmpbsa_folder = self.state_data['RunFolder']", "refinement # # # break # # test = 1 # print('Last Check", "# pickle.dump(self.cluster_models, open(filename, \"wb\")) # TODO create folder for run saving state run", "# if filename is None and filedata is None: # # filename =", "break # # test = 1 # print('Last Check of submitted jobs') while", "1): print(i) time.sleep(1) # Delay for 1 sec print('Ok %s secs have pass'", "LasR_MOR_mmpbsa_calc = g_mmpbsa_dask.GMMPBSAObject(traj, topol_file, tpr_file, mdp_file, index_file, first_index, second_index, molname, receptor_name) >>> >>>", "molecules, uncomment the next line #INCHOH # Permeability allowed between atoms # Float", "g_mmpbsa part if curr_item_prog == 'g_mmpbsa': if workstation_freemem > gmmbpsa_min_mem and jobs_running <max_jobus:", "= results[key]['part_num'] if prog == 'g_mmpbsa': sample_num = results[key]['part_num'] results_dask = results[key]['dask'] original_data", "', self.directories) self.json_state_file = self.sim_folder_run + os.sep + self.receptor_name + '_' + self.molecule_name", "= [] flexible_index_list = [] for i in flexible_bonds_data_text_list: if 'FLEDIH' in i:", "len less than 16 jobs_running = len(submitted_jobs_dask) - len(finished_jobs) max_jobus = max_jobs_to_run #", "folder_utils import json from molmolpy.utils import helper as hlp # matplotlib.style.use('ggplot') sns.set(style=\"darkgrid\") low_seed", "self.state_data_samples['simStates'].update({str(sample_num): {'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'runFinished': False}}) # try: # os.system(command_to_run) # except", "= open(out_name, 'w') out_file.write(out_mem) out_file.close() # out_pdbqt_filename = out_pdbqt_name # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] =", "open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep + results_dask['apolar_filename'] out_mem =", "flexible bond of the ligand # The allowable flexible bonds are listed as", "'ledock_linux_x86' sample_data = self.ledock_input_info[str(sample_num)] parm_name = sample_data['ledock_parm_name'] test = 1 self.save_run_name = \"ledock_{0}_sample_{1}\".format(self.run_type,", "queue_jobs[curr_index] test = 1 curr_worker_id = worker_ids[curr_worker] workstation_info_temp = get_worker_free_temp[curr_worker_id] workstation_preped_temp = workstation_info_temp['preped']", "Comment the next line if you wish to obtain the whole complex SCOOUT", "= self.ledock_data['LeDock_params']['zmin'] # self.ledock_zmax = self.ledock_data['LeDock_params']['zmax'] # # except: # print('LeDock_params is empty", "os.sep + results_dask['contribMM_filename'] out_mem = results_dask['contribMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name", "(c) 2016-2019,<NAME>. # All rights reserved. # Redistribution and use in source and", "\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED", "# print('LeDock_params simStates is empty verify yolo') # # test = 1 def", "for Autodock vina >>> EPI_uber_dock.set_up_Vina_Box() >>> EPI_uber_dock.prepare_Vina_run() >>> EPI_uber_dock.prepVinaSim_uberDock() >>> EPI_uber_dock.runVinaSim_uber() Molecule object", "sys.exit() print(\"Vina sample run command prep finished\") else: print('Please setup vina box settings')", "9999 {0} 0\\n\\n'.format(self.flexaid_res_chain) # Add one extra line for each flexible bond of", "= self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) self.generate_ga_dat_object_file.close() self.state_data['dockSoftware']['FlexAid'].update({'GA_params':", "N is NUMCHROM PRINTCHR 10 ''' generate_ga_dat = 'ga_inp_' + self.receptor_name + '-'", "', path) self.ledock_path = path def prep_mdtraj_object(self): ''' Prepare receptor mdtraj object get", "to ', path) self.mgltools_utilities = path def set_flexaid_path(self, path): print('FlexAid path is set", "= 1 # ############################################################################################### # # # work_address = workstation1_preped['workerAddress'] # # #", "self.ledock_absolute_folder_name + os.sep + self.ligand_ledock_mol2 self.receptor_pybel.write(\"pdb\", self.absolute_receptor_ledock_pdb, overwrite=True) self.ligand_pybel.write(\"mol2\", self.absolute_ligand_ledock_mol2, overwrite=True) self.ledock_folder_exists =", "to print in console # Integer in interval [1,N] where N is NUMCHROM", "}) # self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)] = data test = 1 run_g_mmpbsa.append(data) # # result =", "in interval [0.1,1.0] SPACER 0.375 # Exclude hetero groups in the target (water,metal,modified", "+ self.run_type self.sim_folder_run = self.folder_path + os.sep + self.run_folder_name # Create folder don't", "+= LeDock # # test = 1 # run_docking_queue += run_docking_rDock[curr_rDock:curr_rDock + rDock]", "bonds are listed as FLEDIH lines in Processed_files/BTN.inp # In our example, Biotin", "needs clarification self.prep_mdtraj_object() # original data before transformation # Add receptor name def", "Prepare and run Dock programs >>> EPI_uber_dock.prep_rDock_dock_run_commands() >>> EPI_uber_dock.run_rDock_simulation(parallel=True, waitTime=15) >>> >>> #This", "run ', run_name) if curr_index == 0: curr_index = 0 else: curr_index -=", "self.ledock_path + os.sep + 'ledock_linux_x86' sample_data = self.ledock_input_info[str(sample_num)] parm_name = sample_data['ledock_parm_name'] test =", "run_docking_queue += run_docking_LeDock[curr_LeDock:curr_LeDock + LeDock] # curr_LeDock += LeDock # # test =", "NUMGENER 500 # Use Adaptive Genetic-Algorithm # Value of 0 or 1 ADAPTVGA", "self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w') self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) self.generate_ga_dat_object_file.close() self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) self.state_data['dockSoftware']['FlexAid']['GA_params'].update( {'generateGA_param': self.generate_ga_dat_pameters, 'GA_DataName': self.generate_ga_dat,", "# This might need to get modified def find_sample_files(self, folder): try: VIP =", "USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY", "# TODO enter ledock folder and process structure for docking using lepro #", "= temp_mdtraj self.mdtraj_parts = temp_mdtraj_indexes self.file_save_list = file_save_list self.abs_file_save_list = abs_file_save_list self.simStates =", "pre_job['runFinished'] is False: final_queue_job.append(pre_job) test = 1 self.run_mmpbsa_dask = final_queue_job # random.shuffle(self.run_docking_queue) print('Finished", "program submitted_jobs = [] submitted_jobs_dask = [] queue_jobs = self.run_mmpbsa_dask job_quantity = len(queue_jobs)", "ligand # The allowable flexible bonds are listed as FLEDIH lines in Processed_files/BTN.inp", "{}}) self.prepVinaSim_samples() self.save_state_data_json(filedata=self.state_data_samples, filename=self.json_samples_state_file) self.load_state_called_samples = False self.prep_sample_run = True else: self.load_state_file_samples =", "self.trajectory_mdtraj.topology self.trajectory_mdtraj_topology_dataframe = self.trajectory_mdtraj.topology.to_dataframe() self.objects_loaded = True def get_uber_g_mmpbsa_run_folder_name(self): curr_folder = os.getcwd() return", "# Only calculate the CF for ligand atoms despite including flexible side-chains #SCOLIG", "= tpr_file.read() tpr_filename = tpr_abs.split(os.sep)[-1] # mdp_abs= full_g_mmpbsa_data['mdpFile'] mdp_file = open(mdp_abs, 'r') mdp_mem", "'samples_exhaust': self.samples_exhaust, 'samplesList': self.samples_run, 'folderPath': self.folder_path, 'runType': self.run_type_samples, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder':", "simStates test = 1 self.g_mmpbsa_prepared = True self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts})", "sys.exit(0) def find_sample_folders(self, folder_path='.', dir_name='vina_sample'): try: dir_names = [] for dirname, dirnames, filenames", "and the following # disclaimer in the documentation and/or other materials provided #", "Number of results/docking poses to output MAXRES 20 # Only output scored atoms", "individuals in the population) # Integer in interval [1-N] NUMCHROM 500 # Number", "when REPMODEL is STEADY # Integer in interval [1,N-1] where N is NUMCHROM", "# ./lepro_linux_x86 LasR_flexaid.pdb os.chdir(self.ledock_absolute_folder_name) command_receptor = self.ledock_path + os.sep + 'lepro_linux_x86' + '", "= open(self.generate_ga_dat_name_abs, 'w') # self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template) # self.generate_ga_dat_object_file.close() # # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # #", "normalized surfaces in contacts NORMAR # Define the RMSD cutoff between clusters #", "import run_dask_tools from molmolpy.tools import run_dask_tools test = 1 curr_client = client worker_status", "test = 1 # maybe 2 async threads, one checks finished simulations, other", "# Specify the processed ligand file to use # BTN.inp has the unique", "# # Copyright (c) 2016-2019,<NAME>. # All rights reserved. # Redistribution and use", "+ 1)) self.run_folder_name_samples = self.receptor_name + '_' + self.molecule_name + '_' + self.run_type_samples", "= results_dask['contribMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep", "BTN.inp has the unique RESNUMC identifier LIG9999A inplig = 'INPLIG ' + '{0}.inp\\n\\n'.format(", "= 'key_{0}_{1}'.format(key_name, curr_worker_id) print('Cur run ', run_name) if curr_index == 0: curr_index =", "This will hold information about run states if len(self.directories_samples) == 0: print('Creating folder", "pybel to mol2 for receptor and sd for ligand :return: ''' # self.output_receptor_rdock", "filenames: # print i if 'out' in i: VIP.append(i) # This is not", "# Use normalized surfaces in contacts NORMAR # Define the RMSD cutoff between", "Include water molecules in the target (always removed by default) # Only considered", "Create folder don't forget # Exhaustiveness for all samples # self.directories = self.find_sample_folders(self.folder_path,", "# Need to select those that are not finished for pre_job in run_mmpbsa_queue:", ">>> ligand_file = EPI_folder + os.sep + 'EPI.pdbqt' >>> molname = 'EPI' >>>", "submits jobs ############################################################################################### gmmbpsa_min_mem = 1000 retries_num = 2 curr_index = 0 curr_worker", "self.directories_samples = self.state_data_samples['directory'] self.setup_box = self.state_data_samples['setup'] self.folder_exists = self.state_data_samples['folderCreated'] self.x_center = self.state_data_samples['boxSettings']['center_x'] self.y_center", "= results_dask['contrib_pol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() # out_pdbqt_filename = out_pdbqt_name #", "filename=self.json_samples_state_file) self.load_state_called_samples = False self.prep_sample_run = True else: self.load_state_file_samples = self.json_samples_state_file self.load_state_called_samples =", "os.sep + results_dask['energyMM_filename'] out_mem = results_dask['energyMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name", "+ os.sep + file_save file_save_list.append(file_save) abs_file_save_list.append(abs_file_save) temp_state[str(i)].update({'runFinished':False, 'index':i, 'absFolder':folder_to_save, 'fileSave':file_save, 'absFileSave':abs_file_save, 'firstIndex':self.first_index, 'secondIndex':self.second_index,", "== 0 and len(submitted_jobs_dask) == 1: curr_index = 0 else: curr_index += 1", "in Sample runSim: \", e) # sys.exit(0) def get_molecule_name(self): return self.molecule_name def get_receptor_name(self):", "+ '_' + self.run_type + '.json' # This will hold information about run", "= self.json_state_file filename = self.absolute_json_state_file filedata = self.state_data # elif filedata is not", "in filenames: # print i if 'out' in i: VIP.append(i) # This is", "results # Comment the next line if you wish to obtain the whole", "in [LINEAR,PSHARE] FITMODEL PSHARE # Parameters of the shared fitness function # Floats", "as e: # print(\"error in Sample runSim: \", e) # sys.exit(0) def get_molecule_name(self):", "reading of CONFIG file ENDINP ''' final_str = '''''' # Specify the processed", "out_mem = results_dask['energyMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder +", "self.ligand_name + '.inp' return generate_config_input_file, final_str # self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) # # self.state_data['dockSoftware']['FlexAid']['GA_params'].update( #", "first_index self.second_index = second_index self.prep_g_mmpbsa_run = False self.folder_exists = False # Running vina,whether", "Integer in interval [1-N] NUMGENER 500 # Use Adaptive Genetic-Algorithm # Value of", "{}}} self.state_data.update({'trajectoryFile': self.trajectory_file, 'mdpFile': self.mdp_file, 'tprFile': self.tpr_file, 'indexFile': self.index_file, 'runFolderName': self.run_folder_name, 'folderPath': self.folder_path,", "# This part runs the main program submitted_jobs = [] submitted_jobs_dask = []", "pop_item = modified_submitted_jobs_dask.pop(i) try: if finished_jobs_dict[i] is True: continue except Exception as error:", "Parameter file' self.ledock_rmsd = 0.5 self.set_up_ledock_dock_blind_parameters(title=self.ledock_title, receptor_file=self.lepro_pdb_file, ledock_rmsd=self.ledock_rmsd, x_center=self.x_center, y_center=self.y_center, z_center=self.z_center) else: print('state", "individuals to generate at each generation # Only considered when REPMODEL is STEADY", "POSSIBILITY OF SUCH DAMAGE. # ------------------------------------------------------------------------ --> import itertools import time color_iter =", "Phase total_free_cores = 16 # Production # worker_status = run_dask_tools.get_dask_worker_status(curr_client) # # get_worker_free", "prep files') return 'Do not prep files' traj_len = len(self.trajectory_mdtraj) import math #", "= '''# Number of chromosomes (number individuals in the population) # Integer in", "\" \\ \"--size_x {5} \" \\ \"--size_y {6} \" \\ \"--size_z {7} \"", "settings') except Exception as e: print(\"error in runSim: \", e) sys.exit(0) @hlp.timeit def", "self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)] = data test = 1 run_g_mmpbsa.append(data) # # result = run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num, data)", "Float in interval [0.1,1.0] SPACER 0.375 # Exclude hetero groups in the target", "\"{0}.pdb\".format(self.receptor_name) self.ligand_ledock_mol2 = \"{0}.mol2\".format(self.ligand_name) self.absolute_receptor_ledock_pdb = self.ledock_absolute_folder_name + os.sep + self.receptor_ledock_pdb self.absolute_ligand_ledock_mol2 =", "finished_jobs, finished_jobs_dict) test = 1 ###################################################3 # update index # print(curr_item) # How", "self.state_data_samples['boxSettings']['size_y'] self.z_size = self.state_data_samples['boxSettings']['size_z'] self.num_modes = self.state_data_samples['boxSettings']['numModes'] def hold_nSec(self, n): for i in", "``mode='distance'``. References ---------- \"\"\" def __init__(self, traj, topol, tpr_file, mdp_file, index_file, first_index, second_index,", "job_name = 'Unknown', load_state_file=None): self.load_state_file = load_state_file if load_state_file is not None: self.load_state_data_json(self.load_state_file)", "run_name) if curr_index == 0: curr_index = 0 else: curr_index -= 1 pop_item.update({'workingDir':workstation_dir})", "1 # test = 1 run_mmpbsa_queue = run_g_mmpbsa # run_docking_queue = run_docking_LeDock +", "self.absolute_ligand_ledock_mol2, 'LeDockFolderStatus': self.ledock_folder_exists, 'LeDockAbsFolder': self.ledock_absolute_folder_name, 'LeDockFolderName': self.ledock_folder_name}) self.save_state_data_json() self.load_state_called = False self.ledock_title =", "dir_name=self.run_type) # self.directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.rdock_folder_name) # print('TADA ', self.directories) test = 1", "inplig = 'INPLIG ' + '{0}.inp\\n\\n'.format( self.ligand_flexaid_initials) # Specify to use one or", "index_mem}) test = 1 # data['dask'].update({'cavFile':cav_file_mem }) # self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)] = data test =", "+ '_' + 'rDock' # self.rdock_absolute_folder_name = self.uber_dock_folder + os.sep + self.rdock_folder_name #", "Constant mutation probability # Float in interval [0.0,1.0] # Only considered when ADAPTVGA", "test = 1 ###################################################3 # update index # print(curr_item) # How to save", "# !/usr/bin/env python # # @file __init__.py # @brief G_MMPBSA DASK PROJECT #", "= topol self.tpr_file = tpr_file self.mdp_file = mdp_file self.index_file = index_file self.first_index =", "self.run_mmpbsa_dask = run_g_mmpbsa curr_LeDock = 0 # very slow # while len(run_docking_queue) !=", "prepVinaSampleCommand(self, sample_num): # try: if self.setup_box is not False: # print(\"Running Vina\") #", "'index':i, 'absFolder':folder_to_save, 'fileSave':file_save, 'absFileSave':abs_file_save, 'firstIndex':self.first_index, 'secondIndex':self.second_index, 'indexFile':self.index_file, 'mdpFile':self.mdp_file, 'tprFile':self.tpr_file}) energy_mm = 'energy_MM_{0}.xvg'.format(i) polar", "Production # worker_status = run_dask_tools.get_dask_worker_status(curr_client) # # get_worker_free = run_dask_tools.check_free_resources(worker_status) # # #", "the following # disclaimer in the documentation and/or other materials provided # with", "[LINEAR,PSHARE] FITMODEL PSHARE # Parameters of the shared fitness function # Floats in", "# # This is to run on dask server # # # #", "' + '{0}\\n\\n'.format( self.receptor_flexaid_mol2) # Specify the processed ligand file to use #", "= filedata filename = filename json.dump(filedata, open(filename, \"w\"), sort_keys=True, indent=4) # TODO should", "folder in uberDocker folder \\n') print(self.ledock_directories) folder_utils.create_folder(self.ledock_absolute_folder_name) test = 1 self.receptor_ledock_pdb = \"{0}.pdb\".format(self.receptor_name)", "Use Adaptive Genetic-Algorithm # Value of 0 or 1 ADAPTVGA 1 # Adaptive", "= flexible_bonds_data_text.split('\\n') flexible_index_list_phrases = [] flexible_index_list = [] for i in flexible_bonds_data_text_list: if", "self.flexaid_ga_dat_param_template}) # # # self.state_data_samples = self.state_data.copy() # # self.save_state_data_json() # TODO this", "between clusters # Float in interval [0.5,3.0] CLRMSD 2.0 # Number of results/docking", "self.absolute_receptor_ledock_pdb, overwrite=True) self.ligand_pybel.write(\"mol2\", self.absolute_ligand_ledock_mol2, overwrite=True) self.ledock_folder_exists = True test = 1 # TODO", "{'energySoftware': {'g_mmpbsa': {}}} self.state_data.update({'trajectoryFile': self.trajectory_file, 'mdpFile': self.mdp_file, 'tprFile': self.tpr_file, 'indexFile': self.index_file, 'runFolderName': self.run_folder_name,", "input file Flexaid is very strict about spaces :return: ''' flexaid_config_input_template = '''#", "not False: # print(\"Running Vina\") # TODO need to think about seed #./", "index_file, first_index, second_index, molname='Unknown', receptor_name='Unknown', folder_path='.', job_name = 'Unknown', load_state_file=None): self.load_state_file = load_state_file", "self.ledock_folder_name}) self.save_state_data_json() self.load_state_called = False self.ledock_title = self.receptor_name + '_' + self.ligand_name +", "= True self.generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' + self.ligand_name + '.dat'", "client.submit(run_dask_tools.upload_g_mmpbsa_files_dask, # big_future, # workers=[worker_address], # key='key_scatter_{0}'.format(worker_address), # retries=retries_num) # tasks_upload.append(task) # print(\"Starting", ":return: ''' # self.absolute_path = os.path.abspath(filename) self.load_state_called_samples = True print(os.path.abspath(__file__)) self.state_data_samples = json.load(open(filename,", "False: final_queue_job.append(pre_job) test = 1 self.run_mmpbsa_dask = final_queue_job # random.shuffle(self.run_docking_queue) print('Finished preparing g_mmpbsa", "self.samples_exhaust = self.state_data_samples['samples_exhaust'] self.sim_folder_run_samples = self.state_data_samples['simRunFolder'] # .split('/')[-1] self.directories_samples = self.state_data_samples['directory'] self.setup_box =", "= self.state_data # elif filedata is not None: # filedata = filedata #", "= False self.prep_sample_run = True else: self.load_state_file_samples = self.json_samples_state_file self.load_state_called_samples = True self.load_samples_state_data_json(self.load_state_file_samples)", "sec print('Ok %s secs have pass' % (n)) @hlp.timeit def prepVinaSampleCommand(self, sample_num): #", "and save to json :param num_samples: test value 6 :return: ''' try: self.g_mmpbsa_sim_states", "Vcontacts in the calculations of surfaces in contact COMPLF VCT # Do not", "run states # self.uber_dock_folder = self.get_uber_dock_run_folder_name() ######################################################################################## # # LeDock settings part #", "Specifies that the initial population is generated randomly POPINIMT RANDOM # Fitness function", "self.sim_folder_run + os.sep + self.receptor_name + '_' + self.molecule_name + '.json' if filename", "self.state_data_samples = {} self.g_mmpbsa_run_finished = False self.g_mmpbsa_sim_states = {'simStates': {}} self.objects_loaded = False", "= 1 self.g_mmpbsa_prepared = self.state_data['energySoftware']['g_mmpbsa']['prepare'] # self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) # self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) # self.state_data['energySoftware']['g_mmpbsa'].update({'parts':", "precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- \"\"\" def __init__(self, traj, topol,", "len(self.directories) == 0: print('Creating folder for g_mmpbsa run\\n') print(self.sim_folder_run) folder_utils.create_folder(self.sim_folder_run) self.folder_exists = True", "'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run, 'RunFolder': self.g_mmpbsa_folder, 'absoluteJsonStates': self.absolute_json_state_file, 'directory': self.directories, 'folderCreated':", "for flexaid docking :return: ''' self.flexaid_ga_dat_param_template = '''# Number of chromosomes (number individuals", "= 2 curr_index = 0 curr_worker = 0 # prepare worker ids for", "of freedom (DOF) of the processed ligand with residue number 9999 and chain", "self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key] # results_dask = results[key]['dask'] # else: # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)] = results[key]", "prepare_samples_collection_run(self, standard_exhaust=128, num_samples_run=100, run_type='samples_run'): if self.setup_box is False: print('Please setup simulation box') sys.exit(0)", "curr_folder + os.sep + self.run_folder_name def get_samples_run_folder_name(self): curr_folder = os.getcwd() print(\"Yippie yi kay\",", "uber_docker.UberDockerObject(receptor_file, ligand_file, '.', molname=molname, receptor_name=receptor_name) >>> >>> >>> EPI_uber_dock.prepare_uber_dock_protocol() >>> EPI_uber_dock.run_uber_dock_protocol() Use together", "print('Passed running ', run_name) # submitted_jobs_dask_temp, finished_jobs_temp = self.check_dask_jobs(submitted_jobs_dask,finished_jobs) finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask,", "review # This will be for leDock # if prep_g_mmpbsa is True: #", "# job_test = queue_jobs[0] # # result = run_dask_tools.run_gmmpbsa_using_dask(job_test) test = 1 #", "Neither the name of the molmolpy Developers nor the names of any #", "is for Autodock vina >>> EPI_uber_dock.set_up_Vina_Box() >>> EPI_uber_dock.prepare_Vina_run() >>> EPI_uber_dock.prepVinaSim_uberDock() >>> EPI_uber_dock.runVinaSim_uber() Molecule", "simStates = {'simStates':{}} for i,traj in zip(select_indexes,select_frames): temp_state = {str(i):{}} temp_traj = self.trajectory_mdtraj[traj:traj+div_traj]", "the whole complex SCOOUT # Only calculate the CF for ligand atoms despite", "path def set_ledock_path(self, path): print('LeDock path is set to ', path) self.ledock_path =", "target file to use pdbnam = 'PDBNAM ' + '{0}\\n\\n'.format( self.receptor_flexaid_mol2) # Specify", "worker ids for easier switch worker_ids = {} for i, id in enumerate(get_worker_free):", "test = 1 self.g_mmpbsa_prepared = True self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames}) self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared}) self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) self.state_data['energySoftware']['g_mmpbsa'].update({'fileList':", "line if you wish to obtain the whole complex SCOOUT # Only calculate", "'w') as outfile: # json.dump(self.cluster_models, outfile) # pickle.dump(self.cluster_models, open(filename, \"wb\")) # TODO create", "+ self.run_folder_name # Create folder don't forget # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories", "+ self.ligand_ledock_mol2 self.receptor_pybel.write(\"pdb\", self.absolute_receptor_ledock_pdb, overwrite=True) self.ligand_pybel.write(\"mol2\", self.absolute_ligand_ledock_mol2, overwrite=True) self.ledock_folder_exists = True test =", "modified_submitted_jobs_dask.pop(i) try: if finished_jobs_dict[i] is True: continue except Exception as error: pass finished_jobs.append(job)", "# Value in [B,R,X] for Bissecting, Radical and Extended radical plane # See", "Saves state file :return: ''' # import json # with open(filename, 'w') as", "prep_LeDock_dock_command(self, sample_num, pose_gen=20): ''' prepare each separate rDock run command :param sample_num: :param", "+ 'ledock_linux_x86' sample_data = self.ledock_input_info[str(sample_num)] parm_name = sample_data['ledock_parm_name'] test = 1 self.save_run_name =", "= 1 # test = 1 run_mmpbsa_queue = run_g_mmpbsa # run_docking_queue = run_docking_LeDock", "i) except Exception as error: print('error is ', error) # print('i is ',", "20 poses :return: ''' try: if self.setup_ledock_pameters is not False: # print(\"Running Vina\")", "test = 1 run_mmpbsa_queue = run_g_mmpbsa # run_docking_queue = run_docking_LeDock + run_docking_FlexAid +", "os.chdir('HSL_exhaustiveness') self.receptor_file = self.state_data_samples['receptorFile'] self.ligand_file = self.state_data_samples['ligandFile'] self.exhaustiveness = self.state_data_samples['exhaustivenessList'] self.samples_run = self.state_data_samples['samplesList']", "# ------------------------------------------------------------------------ --> import itertools import time color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',", "abs_folder + os.sep + results_dask['out_filename'] out_mem = results_dask['out_mem'] out_file = open(out_name, 'w') out_file.write(out_mem)", "# # test = 1 # # test = 1 ################################################################################################### test =", "self.sim_folder_run + os.sep + self.receptor_name + '_' + self.molecule_name + '_' + self.run_type", "contact COMPLF VCT # Do not consider intramolecular interactions NOINTR # Side-chain rotamer", "in flexible_index_list_phrases: final_str += y final_str += '\\n' rmsdst = 'RMSDST ' +", "self.prep_samples_run = True self.samples_exhaust = standard_exhaust self.samples_run = list(range(1, num_samples_run + 1)) self.run_folder_name_samples", "test = 1 # ############################################################################################### # # # work_address = workstation1_preped['workerAddress'] # #", "LeDock:D') self.save_state_data_json() test = 1 self.prep_LeDock_run = True @hlp.timeit def prep_LeDock_dock_command(self, sample_num, pose_gen=20):", "print(os.path.abspath(__file__)) self.state_data_samples = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.receptor_file = self.state_data_samples['receptorFile'] self.ligand_file = self.state_data_samples['ligandFile']", "self.mdp_file = mdp_file self.index_file = index_file self.first_index = first_index self.second_index = second_index self.prep_g_mmpbsa_run", "i) print('Finished checking dask submissions ---\\n') print('---' * 10) return finished_jobs, finished_jobs_dict #", "True: full_g_mmpbsa_data = self.state_data['energySoftware']['g_mmpbsa'] test = 1 tpr_abs= full_g_mmpbsa_data['tprFile'] tpr_file = open(tpr_abs, 'rb')", "= open(tpr_abs, 'rb') tpr_mem = tpr_file.read() tpr_filename = tpr_abs.split(os.sep)[-1] # mdp_abs= full_g_mmpbsa_data['mdpFile'] mdp_file", "# Free core approach div_traj = math.ceil(traj_len/total_free_cores) # select_indexes = list(range(total_free_cores)) # Maximum", "try: # self.LeDock_sim_states = self.state_data['dockSoftware']['LeDock']['simStates'] # self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list'] # print('No need to", "self.run_folder_name) print('TADA ', self.directories) self.json_state_file = self.sim_folder_run + os.sep + self.receptor_name + '_'", "grid spacing of the binding-site # Float in interval [0.1,1.0] SPACER 0.375 #", "while len(finished_jobs) != job_quantity: finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs, finished_jobs_dict) time.sleep(60) print('->' *", "# Only considered when REPMODEL is STEADY # Integer in interval [1,N-1] where", "+ '.json' # This will hold information about run states if len(self.directories_samples) ==", "'simRunFolder': self.sim_folder_run_samples, 'directory': self.directories_samples, 'setup': self.setup_box, 'folderCreated': self.folder_exists_samples, 'simStates': {}}) self.prepVinaSim_samples() self.save_state_data_json(filedata=self.state_data_samples, filename=self.json_samples_state_file)", "+ os.sep + results_dask['out_filename'] out_mem = results_dask['out_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close()", "# self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.ledock_directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.ledock_folder_name) print('TADA ', self.ledock_directories) test", "should I add json saving of information or not? def load_samples_state_data_json(self, filename): '''", "\" \\ \"--center_y {3} \" \\ \"--center_z {4} \" \\ \"--size_x {5} \"", "for docking using lepro # ./lepro_linux_x86 LasR_flexaid.pdb os.chdir(self.ledock_absolute_folder_name) command_receptor = self.ledock_path + os.sep", "import math # Free core approach div_traj = math.ceil(traj_len/total_free_cores) # select_indexes = list(range(total_free_cores))", "'ligand_mol2': self.ligand_ledock_mol2, 'lepro_pdb': self.lepro_pdb_file, 'lepro_abs_pdb': self.ledock_absolute_folder_name + os.sep + self.lepro_pdb_file, 'abs_receptor_pdb': self.absolute_receptor_ledock_pdb, 'abs_ligand_mol2':", "[0.0,1.0] from fully permeable to no permeability ROTPER 0.8 # Solvent term penalty", "= temp[-2] flexible_index_list.append(int(flex_index)) temp_line = 'OPTIMZ {0} {1} {2}\\n'.format(self.flexaid_res_number, self.flexaid_res_chain, flex_index) flexible_index_list_phrases.append(temp_line) test", "#self.g_mmpbsa_sim_states = self.state_data['energySoftware']['g_mmpbsa']['simStates'] #self.ledock_samples = self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list'] # Divide trajectory to number of free", "self.molecule_name + '.json' if filename is None and filedata is None: # filename", "of the molmolpy Developers nor the names of any # contributors may be", "# self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) except: print('G_mmpbsa is empty verify yolo') # # test = 1", "+ results_dask['energyMM_filename'] out_mem = results_dask['energyMM_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name =", "self.prepare_uber_docker() # self.prepare_ledock_settings() # self.prep_LeDock_dock_run_commands() @hlp.timeit def prep_LeDock_dock_run_commands(self, num_samples=10): ''' Prepare rdock run", "if self.g_mmpbsa_prepared is True: full_g_mmpbsa_data = self.state_data['energySoftware']['g_mmpbsa'] test = 1 tpr_abs= full_g_mmpbsa_data['tprFile'] tpr_file", "self.ledock_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + 'LeDock' self.ledock_absolute_folder_name =", "= client.submit(run_dask_tools.upload_g_mmpbsa_files_dask, # big_future, # workers=[worker_address], # key='key_scatter_{0}'.format(worker_address), # retries=retries_num) # tasks_upload.append(task) #", "' + 'global_binding_site.pdb\\n\\n' # Specify the degrees of freedom (DOF) of the processed", "receptor_file=self.lepro_pdb_file, ledock_rmsd=self.ledock_rmsd, x_center=self.x_center, y_center=self.y_center, z_center=self.z_center) else: print('state has beeen loaded \\n') ############################################################################## def", "'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run_samples, 'directory': self.directories_samples, 'setup': self.setup_box, 'folderCreated': self.folder_exists_samples, 'simStates': {}}) self.prepVinaSim_samples()", "tpr_filename}) data['dask'].update({'mdpName': mdp_filename}) data['dask'].update({'indexName': index_filename}) data['dask'].update({'trajMem':traj_mem, 'trajName':traj_filename}) data['dask'].update({'tprName': tpr_filename, 'tprMem': tpr_mem}) data['dask'].update({'mdpName': mdp_filename,", "TODO enter ledock folder and process structure for docking using lepro # ./lepro_linux_x86", "len(submitted_jobs_dask) - len(finished_jobs) max_jobus = max_jobs_to_run # g_mmpbsa part if curr_item_prog == 'g_mmpbsa':", "out_mem = results_dask['contrib_apol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder +", "index_abs= full_g_mmpbsa_data['indexFile'] index_file = open(index_abs, 'r') index_mem = index_file.read() index_filename = index_abs.split(os.sep)[-1] #", "CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "# self.ledock_xmax = self.ledock_data['LeDock_params']['xmax'] # self.ledock_ymin = self.ledock_data['LeDock_params']['ymin'] # self.ledock_ymax = self.ledock_data['LeDock_params']['ymax'] #", "# Use Vcontacts indexing VINDEX # Vcontacts plane definition # Value in [B,R,X]", "self.mdtraj_parts}) self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex': self.first_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex': self.second_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile': self.index_file}) self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile':", "the processed target file to use pdbnam = 'PDBNAM ' + '{0}\\n\\n'.format( self.receptor_flexaid_mol2)", "conformers rather than using the Penultimate Rotamer Library #ROTOBS # Defines the grid", "filenames in os.walk(folder): for i in filenames: # print i if 'out' in", "= original_get_worker_free[curr_worker_id]['preped']['workerDir'] workstation_freemem = workstation_preped_temp['freeMemory'] workstation_freecpu = workstation_preped_temp['freeCores'] curr_item_prog = curr_item['Program'] ############################################################ #", "fine print('Updated receptor with LePro\\n') os.chdir(self.uber_dock_folder) self.state_data['dockSoftware']['LeDock'].update( {'receptor_pdb': self.receptor_ledock_pdb, 'ligand_mol2': self.ligand_ledock_mol2, 'lepro_pdb': self.lepro_pdb_file,", "this works need to create a quiiee # # retries_num = 2 #", "finished_jobs_dict # @hlp.timeit def run_dask_gmmpbsa(self, client=None, max_jobs_to_run=10): # from molmolpy.moldock import run_dask_tools from", "self.ledock_data = self.state_data['dockSoftware']['LeDock'] # test = 1 # # # Try to load", "= 1 run_g_mmpbsa.append(data) # # result = run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num, data) # # test =", "include water molecules, uncomment the next line #INCHOH # Permeability allowed between atoms", "filedata filename = filename json.dump(filedata, open(filename, \"w\"), sort_keys=True, indent=4) # TODO should I", "high_seed = 999999999 mgltools_utilities = '/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24' class GMMPBSAObject(object): \"\"\" Usage example >>> EPI_folder", "slow # while len(run_docking_queue) != 40: # run_docking_queue += run_docking_LeDock[curr_LeDock:curr_LeDock + LeDock] #", "are derived from the interaction matrix # Float in interval [-200.0,200.0] SLVPEN 0.0", "self.run_folder_name_samples def save_state_data_json(self, filedata=None, filename=None): ''' :param filename: Saves state file :return: '''", "folder don't forget # self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type) self.directories = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name) print('TADA", "= self.receptor_name + '_' + self.ligand_name + '_LeDock Parameter file' self.ledock_rmsd = 0.5", "# Integer in interval [1-N] NUMGENER 500 # Use Adaptive Genetic-Algorithm # Value", "== 0: curr_index = 0 else: curr_index -= 1 pop_item.update({'workingDir':workstation_dir}) submitted_jobs.append(pop_item) # MAYBE", "# # data, # # workers=[work_address], # # key='key_test', # # retries=retries_num) #", "= mdp_abs.split(os.sep)[-1] index_abs= full_g_mmpbsa_data['indexFile'] index_file = open(index_abs, 'r') index_mem = index_file.read() index_filename =", "of the ligand # Float in interval [1.0-30.0] VARFLX 10.0 # Use Vcontacts", "''' self.flexaid_ga_dat_param_template = '''# Number of chromosomes (number individuals in the population) #", "[0.1,1.0] SPACER 0.375 # Exclude hetero groups in the target (water,metal,modified amino acids,cofactors,ligands)", "= queue_jobs[curr_index] test = 1 curr_worker_id = worker_ids[curr_worker] workstation_info_temp = get_worker_free_temp[curr_worker_id] workstation_preped_temp =", "in os.walk(folder): for i in filenames: # print i if 'out' in i:", "# #big_future = client.scatter(pop_item, workers=[workstation_address], hash=False) big_future = pop_item task_g_mmpbsa = client.submit(run_dask_tools.run_gmmpbsa_using_dask, big_future,", "curr_index == 0: curr_index = 0 else: curr_index -= 1 pop_item.update({'workingDir':workstation_dir}) submitted_jobs.append(pop_item) #", "= self.state_data.copy() # # self.save_state_data_json() # TODO this part needs to be thought", "results[key]['part_num'] if prog == 'g_mmpbsa': sample_num = results[key]['part_num'] results_dask = results[key]['dask'] original_data =", "curr_item = queue_jobs[curr_index] test = 1 curr_worker_id = worker_ids[curr_worker] workstation_info_temp = get_worker_free_temp[curr_worker_id] workstation_preped_temp", "Pagadala Software for molecular docking: a review # This will be for leDock", "traj_abs = data['absFileSave'] traj_file = open(traj_abs, 'rb') traj_mem = traj_file.read() traj_filename = data['fileSave']", "and filedata is None: # # filename = self.json_state_file # filename = self.absolute_json_state_file", "receptor_name=receptor_name) >>> >>> >>> EPI_uber_dock.prepare_uber_dock_protocol() >>> EPI_uber_dock.run_uber_dock_protocol() Use together >>> self.prepare_uber_dock_protocol() for preparation", "run_docking_queue += run_docking_rDock[curr_rDock:curr_rDock + rDock] # curr_rDock += rDock # # run_docking_queue +=", "'LeDockAbsFolder': self.ledock_absolute_folder_name, 'LeDockFolderName': self.ledock_folder_name}) self.save_state_data_json() self.load_state_called = False self.ledock_title = self.receptor_name + '_'", "self.state_data['dockSoftware']['FlexAid']['GA_params'].update( # {'generateGA_param': self.generate_ga_dat_pameters, # 'GA_DataName': self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template})", "generate_config_input_file = 'CONFIG_' + self.receptor_name + '-' + self.ligand_name + '.inp' return generate_config_input_file,", "molmolpy.utils import helper as hlp # matplotlib.style.use('ggplot') sns.set(style=\"darkgrid\") low_seed = 1 high_seed =", "current_pid = multiprocessing.current_process().pid print(\"Main Process with PID:{}\".format(current_pid)) # free_threads_for_Vina = num_threads - LeDock-rDock-FlexAid", "big_future = pop_item task_g_mmpbsa = client.submit(run_dask_tools.run_gmmpbsa_using_dask, big_future, workers=[workstation_address], key=run_name, retries=retries_num) submitted_jobs_dask.append(task_g_mmpbsa) else: key_name", "in interval [0.0,1.0] ADAPTKCO 0.95 0.10 0.95 0.10 # Constant crossover probability #", "dir_name=self.run_type) self.directories_samples = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name_samples) print('TADA ', self.directories_samples) self.json_samples_state_file = self.sim_folder_run_samples + os.sep", "Add receptor name def set_mgltools_path(self, path): print('MGLTools path is set to ', path)", "os.sep + results_dask['apolar_filename'] out_mem = results_dask['apolar_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name", "self.load_state_file = self.json_state_file self.load_state_called = True self.load_state_data_json(self.load_state_file) def prepare_ledock_settings(self): ''' Prepare ultraDock folder", "# {'generateGA_param': self.generate_ga_dat_pameters, # 'GA_DataName': self.generate_ga_dat, # 'GA_DATA_Abs': self.generate_ga_dat_name_abs, # 'GA_ParamFull': self.flexaid_ga_dat_param_template}) #", "hlp # matplotlib.style.use('ggplot') sns.set(style=\"darkgrid\") low_seed = 1 high_seed = 999999999 mgltools_utilities = '/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24'", "self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts}) self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list}) self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates) self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex': self.first_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex': self.second_index}) self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile': self.index_file})", "self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}}) self.state_data['dockSoftware']['FlexAid']['GA_params'].update( {'generateGA_param': self.generate_ga_dat_pameters, 'GA_DataName': self.generate_ga_dat, 'GA_DATA_Abs': self.generate_ga_dat_name_abs, 'GA_ParamFull': self.flexaid_ga_dat_param_template}) # self.state_data_samples", "dask_client # Testing Phase total_free_cores = 16 # Production # worker_status = run_dask_tools.get_dask_worker_status(curr_client)", "+= rDock # # run_docking_queue += run_docking_FlexAid[curr_FlexAid:curr_FlexAid + FlexAid] # # curr_FlexAid +=", "self.sim_folder_run = self.state_data['simRunFolder'] # .split('/')[-1] self.directories = self.state_data['directory'] self.folder_exists = self.state_data['folderCreated'] self.absolute_json_state_file =", "exclude these groups, uncomment the next line #EXCHET # Include water molecules in", "processed ligand with residue number 9999 and chain A # Translational DOF of", "= path def set_ledock_path(self, path): print('LeDock path is set to ', path) self.ledock_path", "uploading to \", worker_address) test = 1 # TODO # This part runs", "clarification self.prep_mdtraj_object() # original data before transformation # Add receptor name def set_mgltools_path(self,", "final_str = '''''' # Specify the processed target file to use pdbnam =", "'.format(self.receptor_ledock_pdb) os.system(command_receptor) self.lepro_pdb_file = 'pro.pdb' # Need to check whteter lepro ran fine", "the following disclaimer. # * Redistributions in binary form must reproduce the above", "the value is 0.0 the solvent interactions are derived from the interaction matrix", "verify yolo') # # try: # self.ledock_param_title = self.ledock_data['LeDock_params']['title'] # self.rdock_title = self.ledock_data['LeDock_params']['title']", "run_docking_queue += run_docking_Vina[curr_Vina:curr_Vina + Vina] # curr_Vina += Vina # # test =", "atoms in the final results # Comment the next line if you wish", "result = run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num, data) # # test = 1 # # test =", "data :return: ''' # self.absolute_path = os.path.abspath(filename) self.load_state_called_samples = True print(os.path.abspath(__file__)) self.state_data_samples =", "need to generate LeDock commands') # self.prep_LeDock_run = True # except: # print('LeDock_params", "is finished :))))))') print('---' * 10) print('\\n') def prepare_for_dask_cluster(self, LeDock=2, rDock=2, FlexAid=2, Vina=2,", "each generation # Only considered when REPMODEL is STEADY # Integer in interval", "tpr_file, mdp_file, index_file, first_index, second_index, molname, receptor_name) >>> >>> >>> >>> LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client) >>>", "GA # The variation in degrees for the anchor angle of the ligand", "# * Redistributions in binary form must reproduce the above # copyright notice,", "e: print(\"error in find_files: \", e) sys.exit(0) def find_sample_folders(self, folder_path='.', dir_name='vina_sample'): try: dir_names", "'key_{0}_{1}'.format(key_name, curr_worker_id) print('Passed running ', run_name) # submitted_jobs_dask_temp, finished_jobs_temp = self.check_dask_jobs(submitted_jobs_dask,finished_jobs) finished_jobs, finished_jobs_dict", "if curr_index == 0 and len(submitted_jobs_dask) == 1: curr_index = 0 else: curr_index", "server # # # # # TODO this works need to create a", "out #################################################################################################################### def prepare_samples_collection_run(self, standard_exhaust=128, num_samples_run=100, run_type='samples_run'): if self.setup_box is False: print('Please setup", "self.run_folder_name, 'folderPath': self.folder_path, 'jsonStates': self.json_state_file, 'runType': self.run_type, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run,", "settings part # # self.ledock_data = self.state_data['dockSoftware']['LeDock'] # test = 1 # #", "mdp_file, index_file, first_index, second_index, molname='Unknown', receptor_name='Unknown', folder_path='.', job_name = 'Unknown', load_state_file=None): self.load_state_file =", "mdtraj object get mdtraj topology and save as pandas dataframe Calculate pdb receptor", "data_pre.update({'tprName':tpr_filename, 'tprMem':tpr_mem}) data_pre.update({'mdpName':mdp_filename, 'mdpMem':mdp_mem}) data_pre.update({'indexName':index_filename, 'indexMem':index_mem}) self.dask_prep = data_pre for part_num in full_g_mmpbsa_data['parts']:", "2 curr_index = 0 curr_worker = 0 # prepare worker ids for easier", "''' # self.output_receptor_rdock = Outputfile(\"mol2\", \"{0}.mol2\".format(self.receptor_name)) # self.output_receptor_rdock.write(self.receptor_pybel) # self.output_receptor_rdock.close() # # self.output_ligand_rdock", "# worker_address = worker_info['preped']['workerAddress'] # # retries_num = 2 # # # Upload", "retain the above copyright # notice, this list of conditions and the following", "it works so comment this part for a while >>> EPI_uber_dock.prepare_rdock_settings() >>> EPI_uber_dock.generate_rdock_cavity()", "self.flexaid_ga_dat_param_template = '''# Number of chromosomes (number individuals in the population) # Integer", "[1,N] where N is NUMCHROM PRINTCHR 10 ''' generate_ga_dat = 'ga_inp_' + self.receptor_name", "True def get_exhaust_run_folder_name(self): curr_folder = os.getcwd() return curr_folder + os.sep + self.run_folder_name def", "polar = 'polar_{0}.xvg'.format(i) apolar = 'apolar_{0}.xvg'.format(i) contrib_mm = 'contrib_MM_{0}.dat'.format(i) contrib_pol = 'contrib_pol_{0}.dat'.format(i) contrib_apol", "= self.trajectory_mdtraj.topology self.trajectory_mdtraj_topology_dataframe = self.trajectory_mdtraj.topology.to_dataframe() self.objects_loaded = True def get_uber_g_mmpbsa_run_folder_name(self): curr_folder = os.getcwd()", "random_seed = np.random.randint(low_seed, high_seed) command_to_run = \"{0} {1}\".format(command_receptor, parm_name) ligand_clear_dok = sample_data['ligand_clear_name'] +", "about run states self.g_mmpbsa_folder = self.get_uber_g_mmpbsa_run_folder_name() self.absolute_json_state_file = self.g_mmpbsa_folder + os.sep + self.receptor_name", "SCOOUT # Only calculate the CF for ligand atoms despite including flexible side-chains", "0 MUTARATE 0.10 # Crossover operator # Intragenic crossovers are possible INTRAGEN #", "in zip(select_indexes,select_frames): temp_state = {str(i):{}} temp_traj = self.trajectory_mdtraj[traj:traj+div_traj] temp_mdtraj.append(temp_traj) temp_mdtraj_indexes.append(i) file_save = 'traj_part{0}.xtc'.format(i)", "# Float in interval [-200.0,200.0] SLVPEN 0.0 # Use Vcontacts indexing VINDEX #", "to save submitted jobs state print('-------') if curr_index == 0 and len(submitted_jobs_dask) ==", "self.directories) test = 1 # This will hold information about run states #", "pandas dataframe as well as numpy list. Read more in the :ref:`User Guide", "= [] abs_file_save_list = [] simStates = {'simStates':{}} for i,traj in zip(select_indexes,select_frames): temp_state", "surfaces in contact COMPLF VCT # Do not consider intramolecular interactions NOINTR #", "self.state_data_samples = json.load(open(filename, \"r\")) # os.chdir('HSL_exhaustiveness') self.receptor_file = self.state_data_samples['receptorFile'] self.ligand_file = self.state_data_samples['ligandFile'] self.exhaustiveness", "Crossover operator # Intragenic crossovers are possible INTRAGEN # Specifies that the initial", "Side-chain rotamer acceptance threshold # Float in interval [0.0-1.0] DEECLA 0.8 # Use", "BOOM # Fraction of population to create # Only considered when REPMODEL is", "= abs_folder + os.sep + results_dask['contrib_pol_filename'] out_mem = results_dask['contrib_pol_mem'] out_file = open(out_name, 'w')", "'r') flexible_bonds_data_text = flexible_bonds_data.read() flexible_bonds_data.close() flexible_bonds_data_text_list = flexible_bonds_data_text.split('\\n') flexible_index_list_phrases = [] flexible_index_list =", "# os.chdir('HSL_exhaustiveness') self.receptor_file = self.state_data_samples['receptorFile'] self.ligand_file = self.state_data_samples['ligandFile'] self.exhaustiveness = self.state_data_samples['exhaustivenessList'] self.samples_run =", "to generate LeDock commands') # self.prep_LeDock_run = True # except: # print('LeDock_params simStates", "zip(select_indexes,select_frames): temp_state = {str(i):{}} temp_traj = self.trajectory_mdtraj[traj:traj+div_traj] temp_mdtraj.append(temp_traj) temp_mdtraj_indexes.append(i) file_save = 'traj_part{0}.xtc'.format(i) abs_file_save", "= 1 try: key = list(results.keys())[0] prog = results[key]['Program'] # need [0] key", "1 run_mmpbsa_queue = run_g_mmpbsa # run_docking_queue = run_docking_LeDock + run_docking_FlexAid + run_docking_Vina final_queue_job", "= [] self.molecule_name = molname self.ligand_name = molname self.receptor_name = receptor_name self.run_type =", "prog == 'g_mmpbsa': sample_num = results[key]['part_num'] results_dask = results[key]['dask'] original_data = self.state_data['energySoftware'][prog] abs_folder", "np.random.randint(low_seed, high_seed) command_to_run = \"{0} {1}\".format(command_receptor, parm_name) ligand_clear_dok = sample_data['ligand_clear_name'] + '.dok' #", "degrees of freedom (DOF) of the processed ligand with residue number 9999 and", "= 0.5 self.set_up_ledock_dock_blind_parameters(title=self.ledock_title, receptor_file=self.lepro_pdb_file, ledock_rmsd=self.ledock_rmsd, x_center=self.x_center, y_center=self.y_center, z_center=self.z_center) else: print('state has beeen loaded", "MMPBSA job to DASK') pop_item = queue_jobs.pop(curr_index) key_name = pop_item['save_run_name'] run_name = 'key_{0}_{1}'.format(key_name,", "PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS;", "self.run_uber_dock_protocol() or seperately >>> EPI_uber_dock.calculate_max_radius_from_com() >>> EPI_uber_dock.calculate_cube_edges() >>> EPI_uber_dock.calculate_box_edges_from_com() >>> >>> >>> EPI_uber_dock.prepare_uber_docker()", "# get_worker_free = run_dask_tools.check_free_resources(worker_status) # # # test = 1 # # total_free_cores", "molecular docking: a review # This will be for leDock # if prep_g_mmpbsa", "# out_pdbqt_filename = out_pdbqt_name # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = \\ # results[key] update_results =", "= list(range(0,traj_len,div_traj)) select_indexes = list(range(len(select_frames))) folder_to_save = self.g_mmpbsa_folder temp_mdtraj = [] temp_mdtraj_indexes =", "self.state_data['folderPath'] self.run_type = self.state_data['runType'] self.molecule_name = self.state_data['molName'] self.receptor_name = self.state_data['receptorName'] # TODO test", "from molmolpy.moldock import run_dask_tools from molmolpy.tools import run_dask_tools test = 1 curr_client =", "Use instances of side-chain conformers rather than using the Penultimate Rotamer Library #ROTOBS", "PRINTCHR 10 ''' self.generate_ga_dat_pameters = True self.generate_ga_dat = 'ga_inp_' + self.receptor_name + '-'", "index_filename = index_abs.split(os.sep)[-1] # data_pre = self.state_data['energySoftware']['g_mmpbsa'] # data_pre.update({'dask': {}}) data_pre = {}", "= self.state_data_samples['boxSettings']['numModes'] def hold_nSec(self, n): for i in range(1, n + 1): print(i)", "better using scatter for big files for upload G_MMPBSA files # test =", "update_results = copy.deepcopy(results) update_results[key].pop('dask', None) # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = results[key] # self.state_data['energySoftware'][prog]['simStates'][str(sample_num)] =", "files' traj_len = len(self.trajectory_mdtraj) import math # Free core approach div_traj = math.ceil(traj_len/total_free_cores)", "is disabled # To include water molecules, uncomment the next line #INCHOH #", "# # Try to load initial LeDock try: self.mdtraj_frames = self.state_data['energySoftware']['g_mmpbsa']['frames'] self.mdtraj_parts =", "interval [0.0,1.0] from fully permeable to no permeability ROTPER 0.8 # Solvent term", "curr_folder) return curr_folder + os.sep + self.run_folder_name_samples def save_state_data_json(self, filedata=None, filename=None): ''' :param", "= results_dask['contrib_apol_mem'] out_file = open(out_name, 'w') out_file.write(out_mem) out_file.close() out_name = abs_folder + os.sep", "def set_mgltools_path(self, path): print('MGLTools path is set to ', path) self.mgltools_utilities = path", "flexaid_config_input_template = '''# Optimization method (genetic-algorithms) METOPT GA # The variation in degrees", "following conditions are # met: # * Redistributions of source code must retain", "2. pybel Stores molecule information in pandas dataframe as well as numpy list.", "# workers=[worker_address], # key='key_scatter_{0}'.format(worker_address), # retries=retries_num) # tasks_upload.append(task) # print(\"Starting uploading to \",", "self.trajectory_file = self.state_data['trajectoryFile'] self.mdp_file = self.state_data['mdpFile'] self.tpr_file = self.state_data['tprFile'] self.index_file = self.state_data['indexFile'] self.folder_path", "elif 'vina_sample_' in i: # VIP.append(i) return VIP except Exception as e: print(\"error", "final_str += optimz2 for y in flexible_index_list_phrases: final_str += y final_str += '\\n'", "# @brief G_MMPBSA DASK PROJECT # @author <NAME> # # <!-------------------------------------------------------------------------- # #", "self.z_center, self.x_size, self.y_size, self.z_size, self.samples_exhaust, self.num_modes, self.save_run_name, self.save_run_name) print(command_to_run) self.command_samples_run_list.append(command_to_run) print(\"Launching new Sim\")", "# print(\"Starting uploading to \", worker_address) test = 1 # TODO # This", "Exception as e: print(\"error in runSim: \", e) sys.exit(0) @hlp.timeit def check_dask_jobs(self, submitted_jobs_dask,", "key sample_num = results[key]['part_num'] if prog == 'g_mmpbsa': sample_num = results[key]['part_num'] results_dask =", "OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY,", "next line #EXCHET # Include water molecules in the target (always removed by", "prep_g_mmpbsa=True): ''' prepare dask tasks for g_mmpbsa :return: ''' self.prepare_g_mmpbsa() test = 1", "= [] # Need to select those that are not finished for pre_job", "Float in interval [0.5,3.0] CLRMSD 2.0 # Number of results/docking poses to output", "seed #./ ledock_linux_x86 dock. in command_receptor = self.ledock_path + os.sep + 'ledock_linux_x86' sample_data", "# print('TADA ', self.directories) test = 1 # This will hold information about", "secs have pass' % (n)) @hlp.timeit def prepVinaSampleCommand(self, sample_num): # try: if self.setup_box", "True self.load_state_data_json(self.load_state_file) def prepare_ledock_settings(self): ''' Prepare ultraDock folder and initial json configuration >>>", "return sorted(dir_names) except Exception as e: print(\"Problem with finding folders : \", e)", "self.receptor_name = self.state_data['receptorName'] # TODO test self.sim_folder_run = self.state_data['simRunFolder'] # .split('/')[-1] self.directories =", "reproduce the above # copyright notice, this list of conditions and the following", "interval [0.0-1.0] DEECLA 0.8 # Use instances of side-chain conformers rather than using", "self.run_type, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run, 'RunFolder': self.g_mmpbsa_folder, 'absoluteJsonStates': self.absolute_json_state_file, 'directory': self.directories,", ">>> # This is for Autodock vina >>> EPI_uber_dock.set_up_Vina_Box() >>> EPI_uber_dock.prepare_Vina_run() >>> EPI_uber_dock.prepVinaSim_uberDock()", "<!-------------------------------------------------------------------------- # # Copyright (c) 2016-2019,<NAME>. # All rights reserved. # Redistribution and", "= results[key]['dask'] # else: # self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)] = results[key] # if filename is None", "randomly POPINIMT RANDOM # Fitness function # Value in [LINEAR,PSHARE] FITMODEL PSHARE #", "self.prep_sample_run = True def get_exhaust_run_folder_name(self): curr_folder = os.getcwd() return curr_folder + os.sep +", "\" \\ \"--center_z {4} \" \\ \"--size_x {5} \" \\ \"--size_y {6} \"", "is NUMCHROM PRINTCHR 10 ''' generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' +", "1365-1373 VCTPLA R # Use normalized surfaces in contacts NORMAR # Define the", "the anchor angle of the ligand # Float in interval [1.0-30.0] VARANG 5.0", "This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d)", "self.prepVinaSim_exhaust() self.save_state_data_json() self.load_state_called = False else: self.load_state_file = self.json_state_file self.load_state_called = True self.load_state_data_json(self.load_state_file)", "is NUMCHROM STEADNUM 950 # Number of TOP individuals to print in console", ">>> EPI_uber_dock.prepare_uber_docker() >>> >>> >>> #This is for rDock, and it works so", "key='key_test', # # retries=retries_num) # # # TODO This part needs further refinement", "# # test = 1 def prepare_g_mmpbsa(self): ''' Prepare g_mmpbsa run folder and", "self.receptor_name + '_' + self.molecule_name + '_' + self.run_type_samples + '.json' # This", "sample_data['ledock_parm_name'] test = 1 self.save_run_name = \"ledock_{0}_sample_{1}\".format(self.run_type, sample_num) random_seed = np.random.randint(low_seed, high_seed) command_to_run", "for i in range(1, n + 1): print(i) time.sleep(1) # Delay for 1", "THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "= tpr_abs.split(os.sep)[-1] # mdp_abs= full_g_mmpbsa_data['mdpFile'] mdp_file = open(mdp_abs, 'r') mdp_mem = mdp_file.read() mdp_filename", "INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED", "# # # self.state_data_samples = self.state_data.copy() # # self.save_state_data_json() # TODO this part", "elif filedata is not None: # filedata = filedata # filename = self.absolute_json_state_file", "GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION)", "client worker_status = run_dask_tools.get_dask_worker_status(curr_client) get_worker_free = run_dask_tools.check_free_resources(worker_status) import copy original_get_worker_free = copy.deepcopy(get_worker_free) #", "filename=self.absolute_json_state_file) # allow CPU to cool down # self.hold_nSec(5) print('This success ---> ',", "mdp_abs.split(os.sep)[-1] index_abs= full_g_mmpbsa_data['indexFile'] index_file = open(index_abs, 'r') index_mem = index_file.read() index_filename = index_abs.split(os.sep)[-1]", "is to run on dask server # # # # # TODO this", "self.find_sample_folders(self.folder_path, dir_name=self.run_type) # self.directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.rdock_folder_name) # print('TADA ', self.directories) test =", "seed self.save_run_name = 'vina_' + self.run_type_samples + '_' + str(sample_num) command_to_run = \"vina", "{'save_run_name': self.save_run_name, 'commandRun': command_to_run, 'commandToClean':command_to_clean, 'dokFileName':ligand_clear_dok, 'runFinished': False}} self.LeDock_sim_states.update(temp_dict) self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict) # try: #", "NUMCHROM 500 # Number of generations # Integer in interval [1-N] NUMGENER 500", "any # contributors may be used to endorse or promote products derived #", "'contrib_apol_{0}.dat'.format(i) temp_state[str(i)].update({'energyMM':energy_mm, 'polar':polar, 'apolar':apolar, 'contrib_MM':contrib_mm, 'contrib_pol':contrib_pol, 'contrib_apol':contrib_apol}) temp_traj.save(abs_file_save) temp_state[str(i)].update({'fileSaved': True }) simStates['simStates'].update(temp_state) self.mdtraj_frames", "True self.state_data_samples.update({'receptorFile': self.receptor_file, 'ligandFile': self.ligand_file, 'exhaustivenessList': self.exhaustiveness, 'samples_exhaust': self.samples_exhaust, 'samplesList': self.samples_run, 'folderPath': self.folder_path,", "# All rights reserved. # Redistribution and use in source and binary forms,", "path def set_flexaid_path(self, path): print('FlexAid path is set to ', path) self.flexaid_path =", "self.absolute_json_state_file = self.state_data['absoluteJsonStates'] self.g_mmpbsa_folder = self.state_data['RunFolder'] self.json_state_file = self.state_data['jsonStates'] test = 1 #", "n): for i in range(1, n + 1): print(i) time.sleep(1) # Delay for", "be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- \"\"\" def __init__(self, traj,", "curr_worker_id) print('Cur run ', run_name) if curr_index == 0: curr_index = 0 else:", "Prepare outputs import copy self.before_dask = copy.deepcopy(self.state_data) ################################################################################ if self.g_mmpbsa_prepared is True: full_g_mmpbsa_data", "'runType': self.run_type, 'molName': self.molecule_name, 'receptorName': self.receptor_name, 'simRunFolder': self.sim_folder_run, 'RunFolder': self.g_mmpbsa_folder, 'absoluteJsonStates': self.absolute_json_state_file, 'directory':", "= self.sim_folder_run + os.sep + self.receptor_name + '_' + self.molecule_name + '.json' if", "def prep_LeDock_dock_run_commands(self, num_samples=10): ''' Prepare rdock run commands and save to json :param", "= 1 curr_client = dask_client # Testing Phase total_free_cores = 16 # Production", "saving of information or not? def load_state_data_json(self, filename): ''' :param filename: load json", "== 'g_mmpbsa': if workstation_freemem > gmmbpsa_min_mem and jobs_running <max_jobus: print('Submit MMPBSA job to", "# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "+ LeDock] # curr_LeDock += LeDock # # test = 1 # run_docking_queue", "# Only output scored atoms in the final results # Comment the next", "= self.g_mmpbsa_folder temp_mdtraj = [] temp_mdtraj_indexes = [] file_save_list = [] abs_file_save_list =", "= 1 self.receptor_ledock_pdb = \"{0}.pdb\".format(self.receptor_name) self.ligand_ledock_mol2 = \"{0}.mol2\".format(self.ligand_name) self.absolute_receptor_ledock_pdb = self.ledock_absolute_folder_name + os.sep", "whteter lepro ran fine print('Updated receptor with LePro\\n') os.chdir(self.uber_dock_folder) self.state_data['dockSoftware']['LeDock'].update( {'receptor_pdb': self.receptor_ledock_pdb, 'ligand_mol2':", "= math.trunc(traj_len/total_free_cores) select_frames = list(range(0,traj_len,div_traj)) select_indexes = list(range(len(select_frames))) folder_to_save = self.g_mmpbsa_folder temp_mdtraj =", "'.json' if filename is None and filedata is None: # filename = self.json_state_file", "tpr_mem = tpr_file.read() tpr_filename = tpr_abs.split(os.sep)[-1] # mdp_abs= full_g_mmpbsa_data['mdpFile'] mdp_file = open(mdp_abs, 'r')", "# workers=[work_address], # # key='key_test', # # retries=retries_num) # # # TODO This", "# # retries_num = 2 # # task = client.submit(run_dask_tools.run_vina_using_dask, # # data,", "'EPI.pdb' >>> molname = 'EPI' >>> receptor_name = 'LasR' >>> >>> >>> EPI_uber_dock", "load_samples_state_data_json(self, filename): ''' :param filename: load json state data :return: ''' # self.absolute_path", "if dir_name in dirname: # # print(dir_name) dir_names.append(dirname) # print sorted(dir_names) return sorted(dir_names)", "rDock run command :param sample_num: :param pose_gen: default generate 20 poses :return: '''", "== 'finished': test = 1 # pop_item = modified_submitted_jobs_dask.pop(i) try: if finished_jobs_dict[i] is", "try: key = list(results.keys())[0] prog = results[key]['Program'] # need [0] key sample_num =", "= get_worker_free[worker] # worker_address = worker_info['preped']['workerAddress'] # # retries_num = 2 # #", "as pandas dataframe Calculate pdb receptor center of mass :return: ''' self.trajectory_mdtraj =", "= EPI_folder + os.sep + 'EPI.pdb' >>> molname = 'EPI' >>> receptor_name =", "workstation_info_temp['preped'] workstation_address = workstation_preped_temp['workerAddress'] # This way folder is buggy workstation_dir = original_get_worker_free[curr_worker_id]['preped']['workerDir']", "abs_file_save_list = [] simStates = {'simStates':{}} for i,traj in zip(select_indexes,select_frames): temp_state = {str(i):{}}", "self.ledock_data['LeDock_params']['zmin'] # self.ledock_zmax = self.ledock_data['LeDock_params']['zmax'] # # except: # print('LeDock_params is empty verify" ]
[ "cost(num_of_cities, distance_matrix, tour) for tour in population] sum_fitness = sum(fitness) return [f /", "if child2[i] == -1: child2[i] = parent1[i] return [child1, child2] def mutate(num_of_cities, child):", "/ cost(tour) \"\"\" fitness = [1 / cost(num_of_cities, distance_matrix, tour) for tour in", "random exchange any two cities \"\"\" return random_neighbour(num_of_cities, child) def _genetic_algorithm(num_of_cities, distance_matrix, population_size,", "// 2): children = crossover_func(num_of_cities, selected[i], selected[i + population_size // 2]) offsprings.extend(children) #", "selection fitness = calculate_fitness(population, num_of_cities, distance_matrix) selected = random.choices(population, fitness, k=population_size) random.shuffle(selected) #", "calculate_fitness(population, num_of_cities, distance_matrix) population = [tour for _, tour in sorted(zip(fitness, population), reverse=True)]", "calculate_fitness(population, num_of_cities, distance_matrix): \"\"\" Return a fitness list for the population Fitness is", "cycle crossover operator \"\"\" child1 = [-1] * num_of_cities child2 = child1.copy() i", "len(population) != population_size: population.add(tuple(random_tour(num_of_cities))) return [list(tour) for tour in population] def calculate_fitness(population, num_of_cities,", "= parent2.index(parent1[i]) for i in range(num_of_cities): if child1[i] == -1: child1[i] = parent2[i]", "population] def calculate_fitness(population, num_of_cities, distance_matrix): \"\"\" Return a fitness list for the population", "Fitness is just 1 / cost(tour) \"\"\" fitness = [1 / cost(num_of_cities, distance_matrix,", "also experiment with different terminating condition for _ in range(num_of_epochs): # selection fitness", "for f in fitness] def order_crossover(num_of_cities, parent1, parent2): \"\"\" Implements order crossover operator", "return [child1, child2] def mutate(num_of_cities, child): \"\"\" Given a child will will give", "= 0 while child1[i] == -1: child1[i] = parent1[i] i = parent1.index(parent2[i]) i", "in parent1: if city not in child2: child2.append(city) for city in parent2: if", "= parent1.index(parent2[i]) i = 0 while child2[i] == -1: child2[i] = parent2[i] i", "== -1: child1[i] = parent2[i] if child2[i] == -1: child2[i] = parent1[i] return", "= [-1] * num_of_cities child2 = child1.copy() i = 0 while child1[i] ==", "offsprings[index]) # replacement population.extend(offsprings) fitness = calculate_fitness(population, num_of_cities, distance_matrix) population = [tour for", "num_of_cities child2 = child1.copy() i = 0 while child1[i] == -1: child1[i] =", "Implements cycle crossover operator \"\"\" child1 = [-1] * num_of_cities child2 = child1.copy()", "child2[i] == -1: child2[i] = parent1[i] return [child1, child2] def mutate(num_of_cities, child): \"\"\"", "parent2[i] i = parent2.index(parent1[i]) for i in range(num_of_cities): if child1[i] == -1: child1[i]", "parent1, parent2): \"\"\" Implements order crossover operator \"\"\" start = random.randint(0, num_of_cities -", "of that tour \"\"\" crossover_func = order_crossover if crossover == 'cycle': crossover_func =", "= parent2[start:end] for city in parent1: if city not in child2: child2.append(city) for", "parent1.index(parent2[i]) i = 0 while child2[i] == -1: child2[i] = parent2[i] i =", "for _ in range(num_of_epochs): # selection fitness = calculate_fitness(population, num_of_cities, distance_matrix) selected =", "in range(num_of_epochs): # selection fitness = calculate_fitness(population, num_of_cities, distance_matrix) selected = random.choices(population, fitness,", "offsprings.extend(children) # mutation for index in range(population_size): if random.uniform(0, 1) < mutation_prob: offsprings[index]", "fitness list for the population Fitness is just 1 / cost(tour) \"\"\" fitness", "TSP Returns the best tour found and cost of that tour \"\"\" crossover_func", "city in parent1: if city not in child2: child2.append(city) for city in parent2:", "[child1, child2] def cycle_crossover(num_of_cities, parent1, parent2): \"\"\" Implements cycle crossover operator \"\"\" child1", "child2: child2.append(city) for city in parent2: if city not in child1: child1.append(city) return", "not in child1: child1.append(city) return [child1, child2] def cycle_crossover(num_of_cities, parent1, parent2): \"\"\" Implements", "offsprings[index] = mutate(num_of_cities, offsprings[index]) # replacement population.extend(offsprings) fitness = calculate_fitness(population, num_of_cities, distance_matrix) population", "# selection fitness = calculate_fitness(population, num_of_cities, distance_matrix) selected = random.choices(population, fitness, k=population_size) random.shuffle(selected)", "num_of_cities * 2 # In my experience a good value for `num_of_epochs` is", "tour \"\"\" crossover_func = order_crossover if crossover == 'cycle': crossover_func = cycle_crossover population", "offsprings = [] for i in range(population_size // 2): children = crossover_func(num_of_cities, selected[i],", "to `num_of_cities`. # You can also experiment with different terminating condition for _", "for city in parent2: if city not in child1: child1.append(city) return [child1, child2]", "for city in parent1: if city not in child2: child2.append(city) for city in", "fitness = calculate_fitness(population, num_of_cities, distance_matrix) selected = random.choices(population, fitness, k=population_size) random.shuffle(selected) # offsprings", "parent2: if city not in child1: child1.append(city) return [child1, child2] def cycle_crossover(num_of_cities, parent1,", "\"\"\" child1 = [-1] * num_of_cities child2 = child1.copy() i = 0 while", "random.uniform(0, 1) < mutation_prob: offsprings[index] = mutate(num_of_cities, offsprings[index]) # replacement population.extend(offsprings) fitness =", "mutate(num_of_cities, offsprings[index]) # replacement population.extend(offsprings) fitness = calculate_fitness(population, num_of_cities, distance_matrix) population = [tour", "is just 1 / cost(tour) \"\"\" fitness = [1 / cost(num_of_cities, distance_matrix, tour)", "def cycle_crossover(num_of_cities, parent1, parent2): \"\"\" Implements cycle crossover operator \"\"\" child1 = [-1]", "a good value for `num_of_epochs` is directly # proportional to `num_of_cities`. # You", "= parent1[start:end] child2 = parent2[start:end] for city in parent1: if city not in", "def mutate(num_of_cities, child): \"\"\" Given a child will will give a mutation Mutation", "population = [tour for _, tour in sorted(zip(fitness, population), reverse=True)] population = population[:population_size]", "- 1) child1 = parent1[start:end] child2 = parent2[start:end] for city in parent1: if", "just 1 / cost(tour) \"\"\" fitness = [1 / cost(num_of_cities, distance_matrix, tour) for", "0 while child1[i] == -1: child1[i] = parent1[i] i = parent1.index(parent2[i]) i =", "in range(num_of_cities): if child1[i] == -1: child1[i] = parent2[i] if child2[i] == -1:", "\"\"\" Implements order crossover operator \"\"\" start = random.randint(0, num_of_cities - 2) end", "i = 0 while child1[i] == -1: child1[i] = parent1[i] i = parent1.index(parent2[i])", "/ sum_fitness for f in fitness] def order_crossover(num_of_cities, parent1, parent2): \"\"\" Implements order", "mutation for index in range(population_size): if random.uniform(0, 1) < mutation_prob: offsprings[index] = mutate(num_of_cities,", "child2[i] = parent1[i] return [child1, child2] def mutate(num_of_cities, child): \"\"\" Given a child", "child) def _genetic_algorithm(num_of_cities, distance_matrix, population_size, mutation_prob, crossover): \"\"\" Implements the genetic algorithm for", "in population] def calculate_fitness(population, num_of_cities, distance_matrix): \"\"\" Return a fitness list for the", "replacement population.extend(offsprings) fitness = calculate_fitness(population, num_of_cities, distance_matrix) population = [tour for _, tour", "child2 = parent2[start:end] for city in parent1: if city not in child2: child2.append(city)", "= parent1[i] return [child1, child2] def mutate(num_of_cities, child): \"\"\" Given a child will", "num_of_epochs = num_of_cities * 2 # In my experience a good value for", "range(population_size // 2): children = crossover_func(num_of_cities, selected[i], selected[i + population_size // 2]) offsprings.extend(children)", "population_size // 2]) offsprings.extend(children) # mutation for index in range(population_size): if random.uniform(0, 1)", "= random.choices(population, fitness, k=population_size) random.shuffle(selected) # offsprings offsprings = [] for i in", "experiment with different terminating condition for _ in range(num_of_epochs): # selection fitness =", "< mutation_prob: offsprings[index] = mutate(num_of_cities, offsprings[index]) # replacement population.extend(offsprings) fitness = calculate_fitness(population, num_of_cities,", "init_population(population_size, num_of_cities): \"\"\" Initializes the population \"\"\" population = set() while len(population) !=", "parent1, parent2): \"\"\" Implements cycle crossover operator \"\"\" child1 = [-1] * num_of_cities", "= random.randint(start, num_of_cities - 1) child1 = parent1[start:end] child2 = parent2[start:end] for city", "# proportional to `num_of_cities`. # You can also experiment with different terminating condition", "child1: child1.append(city) return [child1, child2] def cycle_crossover(num_of_cities, parent1, parent2): \"\"\" Implements cycle crossover", "while child1[i] == -1: child1[i] = parent1[i] i = parent1.index(parent2[i]) i = 0", "directly # proportional to `num_of_cities`. # You can also experiment with different terminating", "population = init_population(population_size, num_of_cities) num_of_epochs = num_of_cities * 2 # In my experience", "my experience a good value for `num_of_epochs` is directly # proportional to `num_of_cities`.", "a fitness list for the population Fitness is just 1 / cost(tour) \"\"\"", "will will give a mutation Mutation is just random exchange any two cities", "crossover == 'cycle': crossover_func = cycle_crossover population = init_population(population_size, num_of_cities) num_of_epochs = num_of_cities", "exchange any two cities \"\"\" return random_neighbour(num_of_cities, child) def _genetic_algorithm(num_of_cities, distance_matrix, population_size, mutation_prob,", "-1: child1[i] = parent2[i] if child2[i] == -1: child2[i] = parent1[i] return [child1,", "= parent1[i] i = parent1.index(parent2[i]) i = 0 while child2[i] == -1: child2[i]", "give a mutation Mutation is just random exchange any two cities \"\"\" return", "for `num_of_epochs` is directly # proportional to `num_of_cities`. # You can also experiment", "genetic algorithm for TSP Returns the best tour found and cost of that", "[] for i in range(population_size // 2): children = crossover_func(num_of_cities, selected[i], selected[i +", "!= population_size: population.add(tuple(random_tour(num_of_cities))) return [list(tour) for tour in population] def calculate_fitness(population, num_of_cities, distance_matrix):", "mutation Mutation is just random exchange any two cities \"\"\" return random_neighbour(num_of_cities, child)", "- 2) end = random.randint(start, num_of_cities - 1) child1 = parent1[start:end] child2 =", "for i in range(population_size // 2): children = crossover_func(num_of_cities, selected[i], selected[i + population_size", "\"\"\" Given a child will will give a mutation Mutation is just random", "= mutate(num_of_cities, offsprings[index]) # replacement population.extend(offsprings) fitness = calculate_fitness(population, num_of_cities, distance_matrix) population =", "# offsprings offsprings = [] for i in range(population_size // 2): children =", "\"\"\" start = random.randint(0, num_of_cities - 2) end = random.randint(start, num_of_cities - 1)", "2): children = crossover_func(num_of_cities, selected[i], selected[i + population_size // 2]) offsprings.extend(children) # mutation", "\"\"\" fitness = [1 / cost(num_of_cities, distance_matrix, tour) for tour in population] sum_fitness", "== -1: child1[i] = parent1[i] i = parent1.index(parent2[i]) i = 0 while child2[i]", "def init_population(population_size, num_of_cities): \"\"\" Initializes the population \"\"\" population = set() while len(population)", "parent1[i] return [child1, child2] def mutate(num_of_cities, child): \"\"\" Given a child will will", "city not in child1: child1.append(city) return [child1, child2] def cycle_crossover(num_of_cities, parent1, parent2): \"\"\"", "_ in range(num_of_epochs): # selection fitness = calculate_fitness(population, num_of_cities, distance_matrix) selected = random.choices(population,", "distance_matrix) selected = random.choices(population, fitness, k=population_size) random.shuffle(selected) # offsprings offsprings = [] for", "tour in population] sum_fitness = sum(fitness) return [f / sum_fitness for f in", "and cost of that tour \"\"\" crossover_func = order_crossover if crossover == 'cycle':", "for the population Fitness is just 1 / cost(tour) \"\"\" fitness = [1", "distance_matrix, population_size, mutation_prob, crossover): \"\"\" Implements the genetic algorithm for TSP Returns the", "child will will give a mutation Mutation is just random exchange any two", "_genetic_algorithm(num_of_cities, distance_matrix, population_size, mutation_prob, crossover): \"\"\" Implements the genetic algorithm for TSP Returns", "from randomized_tsp.utils import cost, random_neighbour, random_tour def init_population(population_size, num_of_cities): \"\"\" Initializes the population", "index in range(population_size): if random.uniform(0, 1) < mutation_prob: offsprings[index] = mutate(num_of_cities, offsprings[index]) #", "f in fitness] def order_crossover(num_of_cities, parent1, parent2): \"\"\" Implements order crossover operator \"\"\"", "for _, tour in sorted(zip(fitness, population), reverse=True)] population = population[:population_size] return population[0], cost(num_of_cities,", "# In my experience a good value for `num_of_epochs` is directly # proportional", "cost(tour) \"\"\" fitness = [1 / cost(num_of_cities, distance_matrix, tour) for tour in population]", "i = parent2.index(parent1[i]) for i in range(num_of_cities): if child1[i] == -1: child1[i] =", "// 2]) offsprings.extend(children) # mutation for index in range(population_size): if random.uniform(0, 1) <", "= cycle_crossover population = init_population(population_size, num_of_cities) num_of_epochs = num_of_cities * 2 # In", "in population] sum_fitness = sum(fitness) return [f / sum_fitness for f in fitness]", "algorithm for TSP Returns the best tour found and cost of that tour", "== -1: child2[i] = parent1[i] return [child1, child2] def mutate(num_of_cities, child): \"\"\" Given", "crossover operator \"\"\" child1 = [-1] * num_of_cities child2 = child1.copy() i =", "child2[i] == -1: child2[i] = parent2[i] i = parent2.index(parent1[i]) for i in range(num_of_cities):", "'cycle': crossover_func = cycle_crossover population = init_population(population_size, num_of_cities) num_of_epochs = num_of_cities * 2", "best tour found and cost of that tour \"\"\" crossover_func = order_crossover if", "offsprings offsprings = [] for i in range(population_size // 2): children = crossover_func(num_of_cities,", "in child2: child2.append(city) for city in parent2: if city not in child1: child1.append(city)", "== -1: child2[i] = parent2[i] i = parent2.index(parent1[i]) for i in range(num_of_cities): if", "\"\"\" Implements the genetic algorithm for TSP Returns the best tour found and", "child): \"\"\" Given a child will will give a mutation Mutation is just", "selected[i], selected[i + population_size // 2]) offsprings.extend(children) # mutation for index in range(population_size):", "child1.copy() i = 0 while child1[i] == -1: child1[i] = parent1[i] i =", "2]) offsprings.extend(children) # mutation for index in range(population_size): if random.uniform(0, 1) < mutation_prob:", "mutate(num_of_cities, child): \"\"\" Given a child will will give a mutation Mutation is", "2 # In my experience a good value for `num_of_epochs` is directly #", "cities \"\"\" return random_neighbour(num_of_cities, child) def _genetic_algorithm(num_of_cities, distance_matrix, population_size, mutation_prob, crossover): \"\"\" Implements", "= parent2[i] i = parent2.index(parent1[i]) for i in range(num_of_cities): if child1[i] == -1:", "\"\"\" Initializes the population \"\"\" population = set() while len(population) != population_size: population.add(tuple(random_tour(num_of_cities)))", "population.add(tuple(random_tour(num_of_cities))) return [list(tour) for tour in population] def calculate_fitness(population, num_of_cities, distance_matrix): \"\"\" Return", "/ cost(num_of_cities, distance_matrix, tour) for tour in population] sum_fitness = sum(fitness) return [f", "i = parent1.index(parent2[i]) i = 0 while child2[i] == -1: child2[i] = parent2[i]", "selected[i + population_size // 2]) offsprings.extend(children) # mutation for index in range(population_size): if", "a child will will give a mutation Mutation is just random exchange any", "crossover_func(num_of_cities, selected[i], selected[i + population_size // 2]) offsprings.extend(children) # mutation for index in", "= num_of_cities * 2 # In my experience a good value for `num_of_epochs`", "cycle_crossover(num_of_cities, parent1, parent2): \"\"\" Implements cycle crossover operator \"\"\" child1 = [-1] *", "tour in sorted(zip(fitness, population), reverse=True)] population = population[:population_size] return population[0], cost(num_of_cities, distance_matrix, population[0])", "-1: child2[i] = parent2[i] i = parent2.index(parent1[i]) for i in range(num_of_cities): if child1[i]", "with different terminating condition for _ in range(num_of_epochs): # selection fitness = calculate_fitness(population,", "fitness = [1 / cost(num_of_cities, distance_matrix, tour) for tour in population] sum_fitness =", "for index in range(population_size): if random.uniform(0, 1) < mutation_prob: offsprings[index] = mutate(num_of_cities, offsprings[index])", "import cost, random_neighbour, random_tour def init_population(population_size, num_of_cities): \"\"\" Initializes the population \"\"\" population", "random.randint(start, num_of_cities - 1) child1 = parent1[start:end] child2 = parent2[start:end] for city in", "good value for `num_of_epochs` is directly # proportional to `num_of_cities`. # You can", "child1 = [-1] * num_of_cities child2 = child1.copy() i = 0 while child1[i]", "def order_crossover(num_of_cities, parent1, parent2): \"\"\" Implements order crossover operator \"\"\" start = random.randint(0,", "Initializes the population \"\"\" population = set() while len(population) != population_size: population.add(tuple(random_tour(num_of_cities))) return", "[child1, child2] def mutate(num_of_cities, child): \"\"\" Given a child will will give a", "proportional to `num_of_cities`. # You can also experiment with different terminating condition for", "-1: child1[i] = parent1[i] i = parent1.index(parent2[i]) i = 0 while child2[i] ==", "will give a mutation Mutation is just random exchange any two cities \"\"\"", "city not in child2: child2.append(city) for city in parent2: if city not in", "children = crossover_func(num_of_cities, selected[i], selected[i + population_size // 2]) offsprings.extend(children) # mutation for", "\"\"\" Return a fitness list for the population Fitness is just 1 /", "[f / sum_fitness for f in fitness] def order_crossover(num_of_cities, parent1, parent2): \"\"\" Implements", "= 0 while child2[i] == -1: child2[i] = parent2[i] i = parent2.index(parent1[i]) for", "= calculate_fitness(population, num_of_cities, distance_matrix) population = [tour for _, tour in sorted(zip(fitness, population),", "= [1 / cost(num_of_cities, distance_matrix, tour) for tour in population] sum_fitness = sum(fitness)", "return [list(tour) for tour in population] def calculate_fitness(population, num_of_cities, distance_matrix): \"\"\" Return a", "crossover): \"\"\" Implements the genetic algorithm for TSP Returns the best tour found", "1 / cost(tour) \"\"\" fitness = [1 / cost(num_of_cities, distance_matrix, tour) for tour", "[list(tour) for tour in population] def calculate_fitness(population, num_of_cities, distance_matrix): \"\"\" Return a fitness", "parent1[i] i = parent1.index(parent2[i]) i = 0 while child2[i] == -1: child2[i] =", "the genetic algorithm for TSP Returns the best tour found and cost of", "population] sum_fitness = sum(fitness) return [f / sum_fitness for f in fitness] def", "== 'cycle': crossover_func = cycle_crossover population = init_population(population_size, num_of_cities) num_of_epochs = num_of_cities *", "return random_neighbour(num_of_cities, child) def _genetic_algorithm(num_of_cities, distance_matrix, population_size, mutation_prob, crossover): \"\"\" Implements the genetic", "num_of_cities): \"\"\" Initializes the population \"\"\" population = set() while len(population) != population_size:", "order crossover operator \"\"\" start = random.randint(0, num_of_cities - 2) end = random.randint(start,", "any two cities \"\"\" return random_neighbour(num_of_cities, child) def _genetic_algorithm(num_of_cities, distance_matrix, population_size, mutation_prob, crossover):", "operator \"\"\" child1 = [-1] * num_of_cities child2 = child1.copy() i = 0", "range(num_of_epochs): # selection fitness = calculate_fitness(population, num_of_cities, distance_matrix) selected = random.choices(population, fitness, k=population_size)", "operator \"\"\" start = random.randint(0, num_of_cities - 2) end = random.randint(start, num_of_cities -", "sum_fitness = sum(fitness) return [f / sum_fitness for f in fitness] def order_crossover(num_of_cities,", "random.randint(0, num_of_cities - 2) end = random.randint(start, num_of_cities - 1) child1 = parent1[start:end]", "is just random exchange any two cities \"\"\" return random_neighbour(num_of_cities, child) def _genetic_algorithm(num_of_cities,", "\"\"\" crossover_func = order_crossover if crossover == 'cycle': crossover_func = cycle_crossover population =", "= [] for i in range(population_size // 2): children = crossover_func(num_of_cities, selected[i], selected[i", "population.extend(offsprings) fitness = calculate_fitness(population, num_of_cities, distance_matrix) population = [tour for _, tour in", "random_neighbour(num_of_cities, child) def _genetic_algorithm(num_of_cities, distance_matrix, population_size, mutation_prob, crossover): \"\"\" Implements the genetic algorithm", "population Fitness is just 1 / cost(tour) \"\"\" fitness = [1 / cost(num_of_cities,", "in range(population_size // 2): children = crossover_func(num_of_cities, selected[i], selected[i + population_size // 2])", "# replacement population.extend(offsprings) fitness = calculate_fitness(population, num_of_cities, distance_matrix) population = [tour for _,", "num_of_cities, distance_matrix) population = [tour for _, tour in sorted(zip(fitness, population), reverse=True)] population", "order_crossover(num_of_cities, parent1, parent2): \"\"\" Implements order crossover operator \"\"\" start = random.randint(0, num_of_cities", "num_of_cities - 1) child1 = parent1[start:end] child2 = parent2[start:end] for city in parent1:", "child2] def mutate(num_of_cities, child): \"\"\" Given a child will will give a mutation", "Mutation is just random exchange any two cities \"\"\" return random_neighbour(num_of_cities, child) def", "\"\"\" Implements cycle crossover operator \"\"\" child1 = [-1] * num_of_cities child2 =", "parent1[start:end] child2 = parent2[start:end] for city in parent1: if city not in child2:", "parent2.index(parent1[i]) for i in range(num_of_cities): if child1[i] == -1: child1[i] = parent2[i] if", "tour found and cost of that tour \"\"\" crossover_func = order_crossover if crossover", "range(population_size): if random.uniform(0, 1) < mutation_prob: offsprings[index] = mutate(num_of_cities, offsprings[index]) # replacement population.extend(offsprings)", "cycle_crossover population = init_population(population_size, num_of_cities) num_of_epochs = num_of_cities * 2 # In my", "for TSP Returns the best tour found and cost of that tour \"\"\"", "end = random.randint(start, num_of_cities - 1) child1 = parent1[start:end] child2 = parent2[start:end] for", "= random.randint(0, num_of_cities - 2) end = random.randint(start, num_of_cities - 1) child1 =", "`num_of_epochs` is directly # proportional to `num_of_cities`. # You can also experiment with", "if crossover == 'cycle': crossover_func = cycle_crossover population = init_population(population_size, num_of_cities) num_of_epochs =", "parent2[i] if child2[i] == -1: child2[i] = parent1[i] return [child1, child2] def mutate(num_of_cities,", "list for the population Fitness is just 1 / cost(tour) \"\"\" fitness =", "crossover_func = cycle_crossover population = init_population(population_size, num_of_cities) num_of_epochs = num_of_cities * 2 #", "Implements the genetic algorithm for TSP Returns the best tour found and cost", "fitness = calculate_fitness(population, num_of_cities, distance_matrix) population = [tour for _, tour in sorted(zip(fitness,", "\"\"\" return random_neighbour(num_of_cities, child) def _genetic_algorithm(num_of_cities, distance_matrix, population_size, mutation_prob, crossover): \"\"\" Implements the", "In my experience a good value for `num_of_epochs` is directly # proportional to", "Returns the best tour found and cost of that tour \"\"\" crossover_func =", "in child1: child1.append(city) return [child1, child2] def cycle_crossover(num_of_cities, parent1, parent2): \"\"\" Implements cycle", "fitness] def order_crossover(num_of_cities, parent1, parent2): \"\"\" Implements order crossover operator \"\"\" start =", "= parent2[i] if child2[i] == -1: child2[i] = parent1[i] return [child1, child2] def", "terminating condition for _ in range(num_of_epochs): # selection fitness = calculate_fitness(population, num_of_cities, distance_matrix)", "num_of_cities, distance_matrix): \"\"\" Return a fitness list for the population Fitness is just", "* num_of_cities child2 = child1.copy() i = 0 while child1[i] == -1: child1[i]", "is directly # proportional to `num_of_cities`. # You can also experiment with different", "if city not in child1: child1.append(city) return [child1, child2] def cycle_crossover(num_of_cities, parent1, parent2):", "def _genetic_algorithm(num_of_cities, distance_matrix, population_size, mutation_prob, crossover): \"\"\" Implements the genetic algorithm for TSP", "cost of that tour \"\"\" crossover_func = order_crossover if crossover == 'cycle': crossover_func", "Given a child will will give a mutation Mutation is just random exchange", "range(num_of_cities): if child1[i] == -1: child1[i] = parent2[i] if child2[i] == -1: child2[i]", "parent2[start:end] for city in parent1: if city not in child2: child2.append(city) for city", "population_size, mutation_prob, crossover): \"\"\" Implements the genetic algorithm for TSP Returns the best", "random.shuffle(selected) # offsprings offsprings = [] for i in range(population_size // 2): children", "random_tour def init_population(population_size, num_of_cities): \"\"\" Initializes the population \"\"\" population = set() while", "= sum(fitness) return [f / sum_fitness for f in fitness] def order_crossover(num_of_cities, parent1,", "= set() while len(population) != population_size: population.add(tuple(random_tour(num_of_cities))) return [list(tour) for tour in population]", "value for `num_of_epochs` is directly # proportional to `num_of_cities`. # You can also", "= child1.copy() i = 0 while child1[i] == -1: child1[i] = parent1[i] i", "random.choices(population, fitness, k=population_size) random.shuffle(selected) # offsprings offsprings = [] for i in range(population_size", "You can also experiment with different terminating condition for _ in range(num_of_epochs): #", "a mutation Mutation is just random exchange any two cities \"\"\" return random_neighbour(num_of_cities,", "\"\"\" population = set() while len(population) != population_size: population.add(tuple(random_tour(num_of_cities))) return [list(tour) for tour", "population = set() while len(population) != population_size: population.add(tuple(random_tour(num_of_cities))) return [list(tour) for tour in", "[tour for _, tour in sorted(zip(fitness, population), reverse=True)] population = population[:population_size] return population[0],", "= order_crossover if crossover == 'cycle': crossover_func = cycle_crossover population = init_population(population_size, num_of_cities)", "1) < mutation_prob: offsprings[index] = mutate(num_of_cities, offsprings[index]) # replacement population.extend(offsprings) fitness = calculate_fitness(population,", "crossover_func = order_crossover if crossover == 'cycle': crossover_func = cycle_crossover population = init_population(population_size,", "= [tour for _, tour in sorted(zip(fitness, population), reverse=True)] population = population[:population_size] return", "if city not in child2: child2.append(city) for city in parent2: if city not", "the best tour found and cost of that tour \"\"\" crossover_func = order_crossover", "selected = random.choices(population, fitness, k=population_size) random.shuffle(selected) # offsprings offsprings = [] for i", "child2.append(city) for city in parent2: if city not in child1: child1.append(city) return [child1,", "child2 = child1.copy() i = 0 while child1[i] == -1: child1[i] = parent1[i]", "the population \"\"\" population = set() while len(population) != population_size: population.add(tuple(random_tour(num_of_cities))) return [list(tour)", "return [f / sum_fitness for f in fitness] def order_crossover(num_of_cities, parent1, parent2): \"\"\"", "for i in range(num_of_cities): if child1[i] == -1: child1[i] = parent2[i] if child2[i]", "`num_of_cities`. # You can also experiment with different terminating condition for _ in", "population_size: population.add(tuple(random_tour(num_of_cities))) return [list(tour) for tour in population] def calculate_fitness(population, num_of_cities, distance_matrix): \"\"\"", "parent2): \"\"\" Implements order crossover operator \"\"\" start = random.randint(0, num_of_cities - 2)", "num_of_cities - 2) end = random.randint(start, num_of_cities - 1) child1 = parent1[start:end] child2", "just random exchange any two cities \"\"\" return random_neighbour(num_of_cities, child) def _genetic_algorithm(num_of_cities, distance_matrix,", "import random from randomized_tsp.utils import cost, random_neighbour, random_tour def init_population(population_size, num_of_cities): \"\"\" Initializes", "child1[i] == -1: child1[i] = parent1[i] i = parent1.index(parent2[i]) i = 0 while", "can also experiment with different terminating condition for _ in range(num_of_epochs): # selection", "Return a fitness list for the population Fitness is just 1 / cost(tour)", "randomized_tsp.utils import cost, random_neighbour, random_tour def init_population(population_size, num_of_cities): \"\"\" Initializes the population \"\"\"", "* 2 # In my experience a good value for `num_of_epochs` is directly", "tour in population] def calculate_fitness(population, num_of_cities, distance_matrix): \"\"\" Return a fitness list for", "= crossover_func(num_of_cities, selected[i], selected[i + population_size // 2]) offsprings.extend(children) # mutation for index", "sum(fitness) return [f / sum_fitness for f in fitness] def order_crossover(num_of_cities, parent1, parent2):", "parent1: if city not in child2: child2.append(city) for city in parent2: if city", "+ population_size // 2]) offsprings.extend(children) # mutation for index in range(population_size): if random.uniform(0,", "for tour in population] sum_fitness = sum(fitness) return [f / sum_fitness for f", "# You can also experiment with different terminating condition for _ in range(num_of_epochs):", "= init_population(population_size, num_of_cities) num_of_epochs = num_of_cities * 2 # In my experience a", "child2[i] = parent2[i] i = parent2.index(parent1[i]) for i in range(num_of_cities): if child1[i] ==", "2) end = random.randint(start, num_of_cities - 1) child1 = parent1[start:end] child2 = parent2[start:end]", "start = random.randint(0, num_of_cities - 2) end = random.randint(start, num_of_cities - 1) child1", "sum_fitness for f in fitness] def order_crossover(num_of_cities, parent1, parent2): \"\"\" Implements order crossover", "[1 / cost(num_of_cities, distance_matrix, tour) for tour in population] sum_fitness = sum(fitness) return", "num_of_cities) num_of_epochs = num_of_cities * 2 # In my experience a good value", "i = 0 while child2[i] == -1: child2[i] = parent2[i] i = parent2.index(parent1[i])", "child1[i] = parent1[i] i = parent1.index(parent2[i]) i = 0 while child2[i] == -1:", "k=population_size) random.shuffle(selected) # offsprings offsprings = [] for i in range(population_size // 2):", "that tour \"\"\" crossover_func = order_crossover if crossover == 'cycle': crossover_func = cycle_crossover", "set() while len(population) != population_size: population.add(tuple(random_tour(num_of_cities))) return [list(tour) for tour in population] def", "the population Fitness is just 1 / cost(tour) \"\"\" fitness = [1 /", "Implements order crossover operator \"\"\" start = random.randint(0, num_of_cities - 2) end =", "_, tour in sorted(zip(fitness, population), reverse=True)] population = population[:population_size] return population[0], cost(num_of_cities, distance_matrix,", "0 while child2[i] == -1: child2[i] = parent2[i] i = parent2.index(parent1[i]) for i", "# mutation for index in range(population_size): if random.uniform(0, 1) < mutation_prob: offsprings[index] =", "for tour in population] def calculate_fitness(population, num_of_cities, distance_matrix): \"\"\" Return a fitness list", "found and cost of that tour \"\"\" crossover_func = order_crossover if crossover ==", "distance_matrix, tour) for tour in population] sum_fitness = sum(fitness) return [f / sum_fitness", "init_population(population_size, num_of_cities) num_of_epochs = num_of_cities * 2 # In my experience a good", "-1: child2[i] = parent1[i] return [child1, child2] def mutate(num_of_cities, child): \"\"\" Given a", "city in parent2: if city not in child1: child1.append(city) return [child1, child2] def", "child1.append(city) return [child1, child2] def cycle_crossover(num_of_cities, parent1, parent2): \"\"\" Implements cycle crossover operator", "cost, random_neighbour, random_tour def init_population(population_size, num_of_cities): \"\"\" Initializes the population \"\"\" population =", "while len(population) != population_size: population.add(tuple(random_tour(num_of_cities))) return [list(tour) for tour in population] def calculate_fitness(population,", "= calculate_fitness(population, num_of_cities, distance_matrix) selected = random.choices(population, fitness, k=population_size) random.shuffle(selected) # offsprings offsprings", "return [child1, child2] def cycle_crossover(num_of_cities, parent1, parent2): \"\"\" Implements cycle crossover operator \"\"\"", "num_of_cities, distance_matrix) selected = random.choices(population, fitness, k=population_size) random.shuffle(selected) # offsprings offsprings = []", "population \"\"\" population = set() while len(population) != population_size: population.add(tuple(random_tour(num_of_cities))) return [list(tour) for", "def calculate_fitness(population, num_of_cities, distance_matrix): \"\"\" Return a fitness list for the population Fitness", "different terminating condition for _ in range(num_of_epochs): # selection fitness = calculate_fitness(population, num_of_cities,", "not in child2: child2.append(city) for city in parent2: if city not in child1:", "order_crossover if crossover == 'cycle': crossover_func = cycle_crossover population = init_population(population_size, num_of_cities) num_of_epochs", "i in range(population_size // 2): children = crossover_func(num_of_cities, selected[i], selected[i + population_size //", "two cities \"\"\" return random_neighbour(num_of_cities, child) def _genetic_algorithm(num_of_cities, distance_matrix, population_size, mutation_prob, crossover): \"\"\"", "fitness, k=population_size) random.shuffle(selected) # offsprings offsprings = [] for i in range(population_size //", "crossover operator \"\"\" start = random.randint(0, num_of_cities - 2) end = random.randint(start, num_of_cities", "mutation_prob, crossover): \"\"\" Implements the genetic algorithm for TSP Returns the best tour", "while child2[i] == -1: child2[i] = parent2[i] i = parent2.index(parent1[i]) for i in", "tour) for tour in population] sum_fitness = sum(fitness) return [f / sum_fitness for", "i in range(num_of_cities): if child1[i] == -1: child1[i] = parent2[i] if child2[i] ==", "in range(population_size): if random.uniform(0, 1) < mutation_prob: offsprings[index] = mutate(num_of_cities, offsprings[index]) # replacement", "distance_matrix) population = [tour for _, tour in sorted(zip(fitness, population), reverse=True)] population =", "mutation_prob: offsprings[index] = mutate(num_of_cities, offsprings[index]) # replacement population.extend(offsprings) fitness = calculate_fitness(population, num_of_cities, distance_matrix)", "1) child1 = parent1[start:end] child2 = parent2[start:end] for city in parent1: if city", "if random.uniform(0, 1) < mutation_prob: offsprings[index] = mutate(num_of_cities, offsprings[index]) # replacement population.extend(offsprings) fitness", "calculate_fitness(population, num_of_cities, distance_matrix) selected = random.choices(population, fitness, k=population_size) random.shuffle(selected) # offsprings offsprings =", "child1 = parent1[start:end] child2 = parent2[start:end] for city in parent1: if city not", "[-1] * num_of_cities child2 = child1.copy() i = 0 while child1[i] == -1:", "experience a good value for `num_of_epochs` is directly # proportional to `num_of_cities`. #", "in fitness] def order_crossover(num_of_cities, parent1, parent2): \"\"\" Implements order crossover operator \"\"\" start", "condition for _ in range(num_of_epochs): # selection fitness = calculate_fitness(population, num_of_cities, distance_matrix) selected", "if child1[i] == -1: child1[i] = parent2[i] if child2[i] == -1: child2[i] =", "child2] def cycle_crossover(num_of_cities, parent1, parent2): \"\"\" Implements cycle crossover operator \"\"\" child1 =", "random_neighbour, random_tour def init_population(population_size, num_of_cities): \"\"\" Initializes the population \"\"\" population = set()", "parent2): \"\"\" Implements cycle crossover operator \"\"\" child1 = [-1] * num_of_cities child2", "child1[i] == -1: child1[i] = parent2[i] if child2[i] == -1: child2[i] = parent1[i]", "child1[i] = parent2[i] if child2[i] == -1: child2[i] = parent1[i] return [child1, child2]", "random from randomized_tsp.utils import cost, random_neighbour, random_tour def init_population(population_size, num_of_cities): \"\"\" Initializes the", "distance_matrix): \"\"\" Return a fitness list for the population Fitness is just 1", "in parent2: if city not in child1: child1.append(city) return [child1, child2] def cycle_crossover(num_of_cities," ]
[]
[ "necessary dialog.exec_() if __name__ == '__main__': app = QApplication(sys.argv) main = QDialogDemo() main.show()", "from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QPushButton class QDialogDemo(QMainWindow): def __init__(self): super(QDialogDemo, self).__init__()", "@staticmethod def function(): dialog = QDialog() button = QPushButton('QUIT', dialog) # button.setText() button.move(50,", "PyQt5.QtCore import Qt from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QPushButton class QDialogDemo(QMainWindow): def", "QDialogDemo(QMainWindow): def __init__(self): super(QDialogDemo, self).__init__() self.button = QPushButton(self) self.init() def init(self): self.setWindowTitle('QDialog') self.resize(300,", "= QDialog() button = QPushButton('QUIT', dialog) # button.setText() button.move(50, 50) button.clicked.connect(dialog.close) dialog.setWindowTitle('...') dialog.setWindowModality(Qt.ApplicationModal)", "from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QPushButton class QDialogDemo(QMainWindow):", "init(self): self.setWindowTitle('QDialog') self.resize(300, 300) self.button.setText('CLICK') self.button.clicked.connect(self.function) @staticmethod def function(): dialog = QDialog() button", "def function(): dialog = QDialog() button = QPushButton('QUIT', dialog) # button.setText() button.move(50, 50)", "button.setText() button.move(50, 50) button.clicked.connect(dialog.close) dialog.setWindowTitle('...') dialog.setWindowModality(Qt.ApplicationModal) # this position is necessary dialog.exec_() if", "PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QPushButton class QDialogDemo(QMainWindow): def __init__(self): super(QDialogDemo, self).__init__() self.button", "def __init__(self): super(QDialogDemo, self).__init__() self.button = QPushButton(self) self.init() def init(self): self.setWindowTitle('QDialog') self.resize(300, 300)", "self.resize(300, 300) self.button.setText('CLICK') self.button.clicked.connect(self.function) @staticmethod def function(): dialog = QDialog() button = QPushButton('QUIT',", "# button.setText() button.move(50, 50) button.clicked.connect(dialog.close) dialog.setWindowTitle('...') dialog.setWindowModality(Qt.ApplicationModal) # this position is necessary dialog.exec_()", "self.button.setText('CLICK') self.button.clicked.connect(self.function) @staticmethod def function(): dialog = QDialog() button = QPushButton('QUIT', dialog) #", "def init(self): self.setWindowTitle('QDialog') self.resize(300, 300) self.button.setText('CLICK') self.button.clicked.connect(self.function) @staticmethod def function(): dialog = QDialog()", "self).__init__() self.button = QPushButton(self) self.init() def init(self): self.setWindowTitle('QDialog') self.resize(300, 300) self.button.setText('CLICK') self.button.clicked.connect(self.function) @staticmethod", "import QDialog, QApplication, QMainWindow, QPushButton class QDialogDemo(QMainWindow): def __init__(self): super(QDialogDemo, self).__init__() self.button =", "<reponame>liyu13264/PyQt5_Practice import sys from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow,", "QDialog, QApplication, QMainWindow, QPushButton class QDialogDemo(QMainWindow): def __init__(self): super(QDialogDemo, self).__init__() self.button = QPushButton(self)", "import sys from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QPushButton", "import Qt from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QPushButton class QDialogDemo(QMainWindow): def __init__(self):", "QMainWindow, QPushButton class QDialogDemo(QMainWindow): def __init__(self): super(QDialogDemo, self).__init__() self.button = QPushButton(self) self.init() def", "is necessary dialog.exec_() if __name__ == '__main__': app = QApplication(sys.argv) main = QDialogDemo()", "50) button.clicked.connect(dialog.close) dialog.setWindowTitle('...') dialog.setWindowModality(Qt.ApplicationModal) # this position is necessary dialog.exec_() if __name__ ==", "self.button.clicked.connect(self.function) @staticmethod def function(): dialog = QDialog() button = QPushButton('QUIT', dialog) # button.setText()", "Qt from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QPushButton class QDialogDemo(QMainWindow): def __init__(self): super(QDialogDemo,", "QPushButton('QUIT', dialog) # button.setText() button.move(50, 50) button.clicked.connect(dialog.close) dialog.setWindowTitle('...') dialog.setWindowModality(Qt.ApplicationModal) # this position is", "dialog.setWindowModality(Qt.ApplicationModal) # this position is necessary dialog.exec_() if __name__ == '__main__': app =", "# this position is necessary dialog.exec_() if __name__ == '__main__': app = QApplication(sys.argv)", "QPushButton(self) self.init() def init(self): self.setWindowTitle('QDialog') self.resize(300, 300) self.button.setText('CLICK') self.button.clicked.connect(self.function) @staticmethod def function(): dialog", "QDialog() button = QPushButton('QUIT', dialog) # button.setText() button.move(50, 50) button.clicked.connect(dialog.close) dialog.setWindowTitle('...') dialog.setWindowModality(Qt.ApplicationModal) #", "= QPushButton('QUIT', dialog) # button.setText() button.move(50, 50) button.clicked.connect(dialog.close) dialog.setWindowTitle('...') dialog.setWindowModality(Qt.ApplicationModal) # this position", "= QPushButton(self) self.init() def init(self): self.setWindowTitle('QDialog') self.resize(300, 300) self.button.setText('CLICK') self.button.clicked.connect(self.function) @staticmethod def function():", "self.init() def init(self): self.setWindowTitle('QDialog') self.resize(300, 300) self.button.setText('CLICK') self.button.clicked.connect(self.function) @staticmethod def function(): dialog =", "super(QDialogDemo, self).__init__() self.button = QPushButton(self) self.init() def init(self): self.setWindowTitle('QDialog') self.resize(300, 300) self.button.setText('CLICK') self.button.clicked.connect(self.function)", "dialog.setWindowTitle('...') dialog.setWindowModality(Qt.ApplicationModal) # this position is necessary dialog.exec_() if __name__ == '__main__': app", "dialog.exec_() if __name__ == '__main__': app = QApplication(sys.argv) main = QDialogDemo() main.show() sys.exit(app.exec_())", "QApplication, QMainWindow, QPushButton class QDialogDemo(QMainWindow): def __init__(self): super(QDialogDemo, self).__init__() self.button = QPushButton(self) self.init()", "sys from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QPushButton class", "function(): dialog = QDialog() button = QPushButton('QUIT', dialog) # button.setText() button.move(50, 50) button.clicked.connect(dialog.close)", "position is necessary dialog.exec_() if __name__ == '__main__': app = QApplication(sys.argv) main =", "dialog) # button.setText() button.move(50, 50) button.clicked.connect(dialog.close) dialog.setWindowTitle('...') dialog.setWindowModality(Qt.ApplicationModal) # this position is necessary", "QPushButton class QDialogDemo(QMainWindow): def __init__(self): super(QDialogDemo, self).__init__() self.button = QPushButton(self) self.init() def init(self):", "__init__(self): super(QDialogDemo, self).__init__() self.button = QPushButton(self) self.init() def init(self): self.setWindowTitle('QDialog') self.resize(300, 300) self.button.setText('CLICK')", "300) self.button.setText('CLICK') self.button.clicked.connect(self.function) @staticmethod def function(): dialog = QDialog() button = QPushButton('QUIT', dialog)", "button.move(50, 50) button.clicked.connect(dialog.close) dialog.setWindowTitle('...') dialog.setWindowModality(Qt.ApplicationModal) # this position is necessary dialog.exec_() if __name__", "dialog = QDialog() button = QPushButton('QUIT', dialog) # button.setText() button.move(50, 50) button.clicked.connect(dialog.close) dialog.setWindowTitle('...')", "this position is necessary dialog.exec_() if __name__ == '__main__': app = QApplication(sys.argv) main", "self.setWindowTitle('QDialog') self.resize(300, 300) self.button.setText('CLICK') self.button.clicked.connect(self.function) @staticmethod def function(): dialog = QDialog() button =", "button = QPushButton('QUIT', dialog) # button.setText() button.move(50, 50) button.clicked.connect(dialog.close) dialog.setWindowTitle('...') dialog.setWindowModality(Qt.ApplicationModal) # this", "self.button = QPushButton(self) self.init() def init(self): self.setWindowTitle('QDialog') self.resize(300, 300) self.button.setText('CLICK') self.button.clicked.connect(self.function) @staticmethod def", "button.clicked.connect(dialog.close) dialog.setWindowTitle('...') dialog.setWindowModality(Qt.ApplicationModal) # this position is necessary dialog.exec_() if __name__ == '__main__':", "class QDialogDemo(QMainWindow): def __init__(self): super(QDialogDemo, self).__init__() self.button = QPushButton(self) self.init() def init(self): self.setWindowTitle('QDialog')" ]
[ "hear a very British problem?' return question(welcome_message) @ASK.intent(\"GetNewBritishProblem\") def handle_get_problem_intent(): \"\"\"Handles the intent", "@ASK.intent(\"GetNewBritishProblem\") def handle_get_problem_intent(): \"\"\"Handles the intent for getting a new british problem and", "def handle_get_problem_intent(): \"\"\"Handles the intent for getting a new british problem and outputting", "@ASK.intent(\"NoIntent\") def handle_no_intent(): \"\"\"Handles an unmatched intent\"\"\" goodbye_message = 'See you later... bye.'", "\"\"\"Entry point for the alexa skill\"\"\" welcome_message = 'Hello there, would you like", "the alexa skill\"\"\" welcome_message = 'Hello there, would you like to hear a", "homepage(): \"\"\"Flask default routing\"\"\" return 'greetz fren' @ASK.launch def start_skill(): \"\"\"Entry point for", "user_pass_dict = {'user': 'alexabritishproblems', 'passwd': '<PASSWORD>&', 'api_type': 'json'} sess = requests.Session() sess.headers.update( {'User-Agent':", "point for the alexa skill\"\"\" welcome_message = 'Hello there, would you like to", "a new british problem and outputting it to Alexa\"\"\" british_problem = get_british_problems() return", "= requests.Session() sess.headers.update( {'User-Agent': 'alexa:british_problems:0.1 ' + '(by /u/alexabritishproblems)'}) sess.post('https://wwww.reddit.com/api/login', data=user_pass_dict) time.sleep(1) url", "def handle_no_intent(): \"\"\"Handles an unmatched intent\"\"\" goodbye_message = 'See you later... bye.' return", "def start_skill(): \"\"\"Entry point for the alexa skill\"\"\" welcome_message = 'Hello there, would", "titles @APP.route('/') def homepage(): \"\"\"Flask default routing\"\"\" return 'greetz fren' @ASK.launch def start_skill():", "Flask(__name__) ASK = Ask(APP, \"/british_problems\") def get_british_problems(): \"\"\"Get the titles of the /r/britishproblems", "sess.headers.update( {'User-Agent': 'alexa:british_problems:0.1 ' + '(by /u/alexabritishproblems)'}) sess.post('https://wwww.reddit.com/api/login', data=user_pass_dict) time.sleep(1) url = \"https://reddit.com/r/britishproblems/.json?limit=10\"", "sess.post('https://wwww.reddit.com/api/login', data=user_pass_dict) time.sleep(1) url = \"https://reddit.com/r/britishproblems/.json?limit=10\" html = sess.get(url) data = json.loads(html.content.decode('utf-8')) titles", "unidecode from flask import Flask from flask_ask import Ask, question, session, statement APP", "British problem?' return question(welcome_message) @ASK.intent(\"GetNewBritishProblem\") def handle_get_problem_intent(): \"\"\"Handles the intent for getting a", "\"\"\"Handles an unmatched intent\"\"\" goodbye_message = 'See you later... bye.' return statement(goodbye_message) if", "get_british_problems() print titles @APP.route('/') def homepage(): \"\"\"Flask default routing\"\"\" return 'greetz fren' @ASK.launch", "question(welcome_message) @ASK.intent(\"GetNewBritishProblem\") def handle_get_problem_intent(): \"\"\"Handles the intent for getting a new british problem", "statement(british_problem) @ASK.intent(\"NoIntent\") def handle_no_intent(): \"\"\"Handles an unmatched intent\"\"\" goodbye_message = 'See you later...", "and outputting it to Alexa\"\"\" british_problem = get_british_problems() return statement(british_problem) @ASK.intent(\"NoIntent\") def handle_no_intent():", "\"/british_problems\") def get_british_problems(): \"\"\"Get the titles of the /r/britishproblems posts\"\"\" user_pass_dict = {'user':", "{'user': 'alexabritishproblems', 'passwd': '<PASSWORD>&', 'api_type': 'json'} sess = requests.Session() sess.headers.update( {'User-Agent': 'alexa:british_problems:0.1 '", "return question(welcome_message) @ASK.intent(\"GetNewBritishProblem\") def handle_get_problem_intent(): \"\"\"Handles the intent for getting a new british", "titles = get_british_problems() print titles @APP.route('/') def homepage(): \"\"\"Flask default routing\"\"\" return 'greetz", "= {'user': 'alexabritishproblems', 'passwd': '<PASSWORD>&', 'api_type': 'json'} sess = requests.Session() sess.headers.update( {'User-Agent': 'alexa:british_problems:0.1", "'Hello there, would you like to hear a very British problem?' return question(welcome_message)", "get_british_problems() return statement(british_problem) @ASK.intent(\"NoIntent\") def handle_no_intent(): \"\"\"Handles an unmatched intent\"\"\" goodbye_message = 'See", "print titles @APP.route('/') def homepage(): \"\"\"Flask default routing\"\"\" return 'greetz fren' @ASK.launch def", "+ '(by /u/alexabritishproblems)'}) sess.post('https://wwww.reddit.com/api/login', data=user_pass_dict) time.sleep(1) url = \"https://reddit.com/r/britishproblems/.json?limit=10\" html = sess.get(url) data", "'greetz fren' @ASK.launch def start_skill(): \"\"\"Entry point for the alexa skill\"\"\" welcome_message =", "import time import requests import unidecode from flask import Flask from flask_ask import", "listing in data['data']['children']] return titles titles = get_british_problems() print titles @APP.route('/') def homepage():", "= 'Hello there, would you like to hear a very British problem?' return", "flask_ask import Ask, question, session, statement APP = Flask(__name__) ASK = Ask(APP, \"/british_problems\")", "fren' @ASK.launch def start_skill(): \"\"\"Entry point for the alexa skill\"\"\" welcome_message = 'Hello", "'passwd': '<PASSWORD>&', 'api_type': 'json'} sess = requests.Session() sess.headers.update( {'User-Agent': 'alexa:british_problems:0.1 ' + '(by", "return 'greetz fren' @ASK.launch def start_skill(): \"\"\"Entry point for the alexa skill\"\"\" welcome_message", "the titles of the /r/britishproblems posts\"\"\" user_pass_dict = {'user': 'alexabritishproblems', 'passwd': '<PASSWORD>&', 'api_type':", "very British problem?' return question(welcome_message) @ASK.intent(\"GetNewBritishProblem\") def handle_get_problem_intent(): \"\"\"Handles the intent for getting", "= json.loads(html.content.decode('utf-8')) titles = [unidecode.unidecode(listing['data']['title']) for listing in data['data']['children']] return titles titles =", "= get_british_problems() print titles @APP.route('/') def homepage(): \"\"\"Flask default routing\"\"\" return 'greetz fren'", "= \"https://reddit.com/r/britishproblems/.json?limit=10\" html = sess.get(url) data = json.loads(html.content.decode('utf-8')) titles = [unidecode.unidecode(listing['data']['title']) for listing", "' + '(by /u/alexabritishproblems)'}) sess.post('https://wwww.reddit.com/api/login', data=user_pass_dict) time.sleep(1) url = \"https://reddit.com/r/britishproblems/.json?limit=10\" html = sess.get(url)", "handle_no_intent(): \"\"\"Handles an unmatched intent\"\"\" goodbye_message = 'See you later... bye.' return statement(goodbye_message)", "titles titles = get_british_problems() print titles @APP.route('/') def homepage(): \"\"\"Flask default routing\"\"\" return", "getting a new british problem and outputting it to Alexa\"\"\" british_problem = get_british_problems()", "requests.Session() sess.headers.update( {'User-Agent': 'alexa:british_problems:0.1 ' + '(by /u/alexabritishproblems)'}) sess.post('https://wwww.reddit.com/api/login', data=user_pass_dict) time.sleep(1) url =", "time.sleep(1) url = \"https://reddit.com/r/britishproblems/.json?limit=10\" html = sess.get(url) data = json.loads(html.content.decode('utf-8')) titles = [unidecode.unidecode(listing['data']['title'])", "problem and outputting it to Alexa\"\"\" british_problem = get_british_problems() return statement(british_problem) @ASK.intent(\"NoIntent\") def", "default routing\"\"\" return 'greetz fren' @ASK.launch def start_skill(): \"\"\"Entry point for the alexa", "import requests import unidecode from flask import Flask from flask_ask import Ask, question,", "british problem and outputting it to Alexa\"\"\" british_problem = get_british_problems() return statement(british_problem) @ASK.intent(\"NoIntent\")", "goodbye_message = 'See you later... bye.' return statement(goodbye_message) if __name__ == '__main__': APP.run(debug=True)", "question, session, statement APP = Flask(__name__) ASK = Ask(APP, \"/british_problems\") def get_british_problems(): \"\"\"Get", "time import requests import unidecode from flask import Flask from flask_ask import Ask,", "data=user_pass_dict) time.sleep(1) url = \"https://reddit.com/r/britishproblems/.json?limit=10\" html = sess.get(url) data = json.loads(html.content.decode('utf-8')) titles =", "skill\"\"\" welcome_message = 'Hello there, would you like to hear a very British", "get_british_problems(): \"\"\"Get the titles of the /r/britishproblems posts\"\"\" user_pass_dict = {'user': 'alexabritishproblems', 'passwd':", "data['data']['children']] return titles titles = get_british_problems() print titles @APP.route('/') def homepage(): \"\"\"Flask default", "problem?' return question(welcome_message) @ASK.intent(\"GetNewBritishProblem\") def handle_get_problem_intent(): \"\"\"Handles the intent for getting a new", "= Flask(__name__) ASK = Ask(APP, \"/british_problems\") def get_british_problems(): \"\"\"Get the titles of the", "start_skill(): \"\"\"Entry point for the alexa skill\"\"\" welcome_message = 'Hello there, would you", "@APP.route('/') def homepage(): \"\"\"Flask default routing\"\"\" return 'greetz fren' @ASK.launch def start_skill(): \"\"\"Entry", "alexa skill\"\"\" welcome_message = 'Hello there, would you like to hear a very", "html = sess.get(url) data = json.loads(html.content.decode('utf-8')) titles = [unidecode.unidecode(listing['data']['title']) for listing in data['data']['children']]", "of the /r/britishproblems posts\"\"\" user_pass_dict = {'user': 'alexabritishproblems', 'passwd': '<PASSWORD>&', 'api_type': 'json'} sess", "Flask from flask_ask import Ask, question, session, statement APP = Flask(__name__) ASK =", "in data['data']['children']] return titles titles = get_british_problems() print titles @APP.route('/') def homepage(): \"\"\"Flask", "'<PASSWORD>&', 'api_type': 'json'} sess = requests.Session() sess.headers.update( {'User-Agent': 'alexa:british_problems:0.1 ' + '(by /u/alexabritishproblems)'})", "sess = requests.Session() sess.headers.update( {'User-Agent': 'alexa:british_problems:0.1 ' + '(by /u/alexabritishproblems)'}) sess.post('https://wwww.reddit.com/api/login', data=user_pass_dict) time.sleep(1)", "ASK = Ask(APP, \"/british_problems\") def get_british_problems(): \"\"\"Get the titles of the /r/britishproblems posts\"\"\"", "data = json.loads(html.content.decode('utf-8')) titles = [unidecode.unidecode(listing['data']['title']) for listing in data['data']['children']] return titles titles", "outputting it to Alexa\"\"\" british_problem = get_british_problems() return statement(british_problem) @ASK.intent(\"NoIntent\") def handle_no_intent(): \"\"\"Handles", "= [unidecode.unidecode(listing['data']['title']) for listing in data['data']['children']] return titles titles = get_british_problems() print titles", "titles = [unidecode.unidecode(listing['data']['title']) for listing in data['data']['children']] return titles titles = get_british_problems() print", "def get_british_problems(): \"\"\"Get the titles of the /r/britishproblems posts\"\"\" user_pass_dict = {'user': 'alexabritishproblems',", "unmatched intent\"\"\" goodbye_message = 'See you later... bye.' return statement(goodbye_message) if __name__ ==", "session, statement APP = Flask(__name__) ASK = Ask(APP, \"/british_problems\") def get_british_problems(): \"\"\"Get the", "a very British problem?' return question(welcome_message) @ASK.intent(\"GetNewBritishProblem\") def handle_get_problem_intent(): \"\"\"Handles the intent for", "import Ask, question, session, statement APP = Flask(__name__) ASK = Ask(APP, \"/british_problems\") def", "url = \"https://reddit.com/r/britishproblems/.json?limit=10\" html = sess.get(url) data = json.loads(html.content.decode('utf-8')) titles = [unidecode.unidecode(listing['data']['title']) for", "import Flask from flask_ask import Ask, question, session, statement APP = Flask(__name__) ASK", "intent\"\"\" goodbye_message = 'See you later... bye.' return statement(goodbye_message) if __name__ == '__main__':", "\"\"\"Get the titles of the /r/britishproblems posts\"\"\" user_pass_dict = {'user': 'alexabritishproblems', 'passwd': '<PASSWORD>&',", "for listing in data['data']['children']] return titles titles = get_british_problems() print titles @APP.route('/') def", "from flask_ask import Ask, question, session, statement APP = Flask(__name__) ASK = Ask(APP,", "import json import time import requests import unidecode from flask import Flask from", "import unidecode from flask import Flask from flask_ask import Ask, question, session, statement", "statement APP = Flask(__name__) ASK = Ask(APP, \"/british_problems\") def get_british_problems(): \"\"\"Get the titles", "Alexa\"\"\" british_problem = get_british_problems() return statement(british_problem) @ASK.intent(\"NoIntent\") def handle_no_intent(): \"\"\"Handles an unmatched intent\"\"\"", "return titles titles = get_british_problems() print titles @APP.route('/') def homepage(): \"\"\"Flask default routing\"\"\"", "'(by /u/alexabritishproblems)'}) sess.post('https://wwww.reddit.com/api/login', data=user_pass_dict) time.sleep(1) url = \"https://reddit.com/r/britishproblems/.json?limit=10\" html = sess.get(url) data =", "'json'} sess = requests.Session() sess.headers.update( {'User-Agent': 'alexa:british_problems:0.1 ' + '(by /u/alexabritishproblems)'}) sess.post('https://wwww.reddit.com/api/login', data=user_pass_dict)", "\"https://reddit.com/r/britishproblems/.json?limit=10\" html = sess.get(url) data = json.loads(html.content.decode('utf-8')) titles = [unidecode.unidecode(listing['data']['title']) for listing in", "return statement(british_problem) @ASK.intent(\"NoIntent\") def handle_no_intent(): \"\"\"Handles an unmatched intent\"\"\" goodbye_message = 'See you", "/u/alexabritishproblems)'}) sess.post('https://wwww.reddit.com/api/login', data=user_pass_dict) time.sleep(1) url = \"https://reddit.com/r/britishproblems/.json?limit=10\" html = sess.get(url) data = json.loads(html.content.decode('utf-8'))", "Ask, question, session, statement APP = Flask(__name__) ASK = Ask(APP, \"/british_problems\") def get_british_problems():", "handle_get_problem_intent(): \"\"\"Handles the intent for getting a new british problem and outputting it", "for getting a new british problem and outputting it to Alexa\"\"\" british_problem =", "APP = Flask(__name__) ASK = Ask(APP, \"/british_problems\") def get_british_problems(): \"\"\"Get the titles of", "= get_british_problems() return statement(british_problem) @ASK.intent(\"NoIntent\") def handle_no_intent(): \"\"\"Handles an unmatched intent\"\"\" goodbye_message =", "sess.get(url) data = json.loads(html.content.decode('utf-8')) titles = [unidecode.unidecode(listing['data']['title']) for listing in data['data']['children']] return titles", "new british problem and outputting it to Alexa\"\"\" british_problem = get_british_problems() return statement(british_problem)", "would you like to hear a very British problem?' return question(welcome_message) @ASK.intent(\"GetNewBritishProblem\") def", "[unidecode.unidecode(listing['data']['title']) for listing in data['data']['children']] return titles titles = get_british_problems() print titles @APP.route('/')", "json import time import requests import unidecode from flask import Flask from flask_ask", "'alexabritishproblems', 'passwd': '<PASSWORD>&', 'api_type': 'json'} sess = requests.Session() sess.headers.update( {'User-Agent': 'alexa:british_problems:0.1 ' +", "to Alexa\"\"\" british_problem = get_british_problems() return statement(british_problem) @ASK.intent(\"NoIntent\") def handle_no_intent(): \"\"\"Handles an unmatched", "the intent for getting a new british problem and outputting it to Alexa\"\"\"", "def homepage(): \"\"\"Flask default routing\"\"\" return 'greetz fren' @ASK.launch def start_skill(): \"\"\"Entry point", "/r/britishproblems posts\"\"\" user_pass_dict = {'user': 'alexabritishproblems', 'passwd': '<PASSWORD>&', 'api_type': 'json'} sess = requests.Session()", "to hear a very British problem?' return question(welcome_message) @ASK.intent(\"GetNewBritishProblem\") def handle_get_problem_intent(): \"\"\"Handles the", "posts\"\"\" user_pass_dict = {'user': 'alexabritishproblems', 'passwd': '<PASSWORD>&', 'api_type': 'json'} sess = requests.Session() sess.headers.update(", "{'User-Agent': 'alexa:british_problems:0.1 ' + '(by /u/alexabritishproblems)'}) sess.post('https://wwww.reddit.com/api/login', data=user_pass_dict) time.sleep(1) url = \"https://reddit.com/r/britishproblems/.json?limit=10\" html", "the /r/britishproblems posts\"\"\" user_pass_dict = {'user': 'alexabritishproblems', 'passwd': '<PASSWORD>&', 'api_type': 'json'} sess =", "there, would you like to hear a very British problem?' return question(welcome_message) @ASK.intent(\"GetNewBritishProblem\")", "it to Alexa\"\"\" british_problem = get_british_problems() return statement(british_problem) @ASK.intent(\"NoIntent\") def handle_no_intent(): \"\"\"Handles an", "\"\"\"Handles the intent for getting a new british problem and outputting it to", "you like to hear a very British problem?' return question(welcome_message) @ASK.intent(\"GetNewBritishProblem\") def handle_get_problem_intent():", "welcome_message = 'Hello there, would you like to hear a very British problem?'", "routing\"\"\" return 'greetz fren' @ASK.launch def start_skill(): \"\"\"Entry point for the alexa skill\"\"\"", "\"\"\"Flask default routing\"\"\" return 'greetz fren' @ASK.launch def start_skill(): \"\"\"Entry point for the", "an unmatched intent\"\"\" goodbye_message = 'See you later... bye.' return statement(goodbye_message) if __name__", "titles of the /r/britishproblems posts\"\"\" user_pass_dict = {'user': 'alexabritishproblems', 'passwd': '<PASSWORD>&', 'api_type': 'json'}", "requests import unidecode from flask import Flask from flask_ask import Ask, question, session,", "'api_type': 'json'} sess = requests.Session() sess.headers.update( {'User-Agent': 'alexa:british_problems:0.1 ' + '(by /u/alexabritishproblems)'}) sess.post('https://wwww.reddit.com/api/login',", "@ASK.launch def start_skill(): \"\"\"Entry point for the alexa skill\"\"\" welcome_message = 'Hello there,", "= Ask(APP, \"/british_problems\") def get_british_problems(): \"\"\"Get the titles of the /r/britishproblems posts\"\"\" user_pass_dict", "intent for getting a new british problem and outputting it to Alexa\"\"\" british_problem", "flask import Flask from flask_ask import Ask, question, session, statement APP = Flask(__name__)", "'alexa:british_problems:0.1 ' + '(by /u/alexabritishproblems)'}) sess.post('https://wwww.reddit.com/api/login', data=user_pass_dict) time.sleep(1) url = \"https://reddit.com/r/britishproblems/.json?limit=10\" html =", "json.loads(html.content.decode('utf-8')) titles = [unidecode.unidecode(listing['data']['title']) for listing in data['data']['children']] return titles titles = get_british_problems()", "for the alexa skill\"\"\" welcome_message = 'Hello there, would you like to hear", "like to hear a very British problem?' return question(welcome_message) @ASK.intent(\"GetNewBritishProblem\") def handle_get_problem_intent(): \"\"\"Handles", "from flask import Flask from flask_ask import Ask, question, session, statement APP =", "british_problem = get_british_problems() return statement(british_problem) @ASK.intent(\"NoIntent\") def handle_no_intent(): \"\"\"Handles an unmatched intent\"\"\" goodbye_message", "Ask(APP, \"/british_problems\") def get_british_problems(): \"\"\"Get the titles of the /r/britishproblems posts\"\"\" user_pass_dict =", "= sess.get(url) data = json.loads(html.content.decode('utf-8')) titles = [unidecode.unidecode(listing['data']['title']) for listing in data['data']['children']] return" ]
[ "['-created'] get_latest_by = 'created' created = CreationDateTimeField(db_index=True) class CopyrightLicense(models.TextChoices): CC_0 = 'CC-0', 'CC-0'", "CreationDateTimeField from django_extensions.db.models import TimeStampedModel class CreationSortedTimeStampedModel(TimeStampedModel): class Meta(TimeStampedModel.Meta): abstract = True ordering", "import CreationDateTimeField from django_extensions.db.models import TimeStampedModel class CreationSortedTimeStampedModel(TimeStampedModel): class Meta(TimeStampedModel.Meta): abstract = True", "'created' created = CreationDateTimeField(db_index=True) class CopyrightLicense(models.TextChoices): CC_0 = 'CC-0', 'CC-0' # These 2", "from django.db import models from django_extensions.db.fields import CreationDateTimeField from django_extensions.db.models import TimeStampedModel class", "'CC-0' # These 2 require attribution CC_BY = 'CC-BY', 'CC-BY' CC_BY_NC = 'CC-BY-NC',", "models from django_extensions.db.fields import CreationDateTimeField from django_extensions.db.models import TimeStampedModel class CreationSortedTimeStampedModel(TimeStampedModel): class Meta(TimeStampedModel.Meta):", "from django_extensions.db.models import TimeStampedModel class CreationSortedTimeStampedModel(TimeStampedModel): class Meta(TimeStampedModel.Meta): abstract = True ordering =", "import models from django_extensions.db.fields import CreationDateTimeField from django_extensions.db.models import TimeStampedModel class CreationSortedTimeStampedModel(TimeStampedModel): class", "CC_0 = 'CC-0', 'CC-0' # These 2 require attribution CC_BY = 'CC-BY', 'CC-BY'", "CreationSortedTimeStampedModel(TimeStampedModel): class Meta(TimeStampedModel.Meta): abstract = True ordering = ['-created'] get_latest_by = 'created' created", "CopyrightLicense(models.TextChoices): CC_0 = 'CC-0', 'CC-0' # These 2 require attribution CC_BY = 'CC-BY',", "TimeStampedModel class CreationSortedTimeStampedModel(TimeStampedModel): class Meta(TimeStampedModel.Meta): abstract = True ordering = ['-created'] get_latest_by =", "class CopyrightLicense(models.TextChoices): CC_0 = 'CC-0', 'CC-0' # These 2 require attribution CC_BY =", "created = CreationDateTimeField(db_index=True) class CopyrightLicense(models.TextChoices): CC_0 = 'CC-0', 'CC-0' # These 2 require", "= 'CC-0', 'CC-0' # These 2 require attribution CC_BY = 'CC-BY', 'CC-BY' CC_BY_NC", "'CC-0', 'CC-0' # These 2 require attribution CC_BY = 'CC-BY', 'CC-BY' CC_BY_NC =", "django_extensions.db.models import TimeStampedModel class CreationSortedTimeStampedModel(TimeStampedModel): class Meta(TimeStampedModel.Meta): abstract = True ordering = ['-created']", "= 'created' created = CreationDateTimeField(db_index=True) class CopyrightLicense(models.TextChoices): CC_0 = 'CC-0', 'CC-0' # These", "get_latest_by = 'created' created = CreationDateTimeField(db_index=True) class CopyrightLicense(models.TextChoices): CC_0 = 'CC-0', 'CC-0' #", "# These 2 require attribution CC_BY = 'CC-BY', 'CC-BY' CC_BY_NC = 'CC-BY-NC', 'CC-BY-NC'", "class Meta(TimeStampedModel.Meta): abstract = True ordering = ['-created'] get_latest_by = 'created' created =", "django.db import models from django_extensions.db.fields import CreationDateTimeField from django_extensions.db.models import TimeStampedModel class CreationSortedTimeStampedModel(TimeStampedModel):", "abstract = True ordering = ['-created'] get_latest_by = 'created' created = CreationDateTimeField(db_index=True) class", "= True ordering = ['-created'] get_latest_by = 'created' created = CreationDateTimeField(db_index=True) class CopyrightLicense(models.TextChoices):", "class CreationSortedTimeStampedModel(TimeStampedModel): class Meta(TimeStampedModel.Meta): abstract = True ordering = ['-created'] get_latest_by = 'created'", "True ordering = ['-created'] get_latest_by = 'created' created = CreationDateTimeField(db_index=True) class CopyrightLicense(models.TextChoices): CC_0", "import TimeStampedModel class CreationSortedTimeStampedModel(TimeStampedModel): class Meta(TimeStampedModel.Meta): abstract = True ordering = ['-created'] get_latest_by", "= CreationDateTimeField(db_index=True) class CopyrightLicense(models.TextChoices): CC_0 = 'CC-0', 'CC-0' # These 2 require attribution", "django_extensions.db.fields import CreationDateTimeField from django_extensions.db.models import TimeStampedModel class CreationSortedTimeStampedModel(TimeStampedModel): class Meta(TimeStampedModel.Meta): abstract =", "= ['-created'] get_latest_by = 'created' created = CreationDateTimeField(db_index=True) class CopyrightLicense(models.TextChoices): CC_0 = 'CC-0',", "from django_extensions.db.fields import CreationDateTimeField from django_extensions.db.models import TimeStampedModel class CreationSortedTimeStampedModel(TimeStampedModel): class Meta(TimeStampedModel.Meta): abstract", "Meta(TimeStampedModel.Meta): abstract = True ordering = ['-created'] get_latest_by = 'created' created = CreationDateTimeField(db_index=True)", "CreationDateTimeField(db_index=True) class CopyrightLicense(models.TextChoices): CC_0 = 'CC-0', 'CC-0' # These 2 require attribution CC_BY", "ordering = ['-created'] get_latest_by = 'created' created = CreationDateTimeField(db_index=True) class CopyrightLicense(models.TextChoices): CC_0 =" ]
[ "t = (self.mean - other.mean)/np.sqrt(s1 + s2) # calculate the probability using the", "def _rvs(self): return np.zeros(self._size) DeterministicDistribution = DeterministicDistribution_gen( name='DeterministicDistribution' ) class LogUniformDistribution_gen(stats.rv_continuous): \"\"\" Log-uniform", "- linalg.expm(x.flat[i]*Theta)[0, :].sum() elif x == 0: res = 0 else: if self.method", "np.sum(self._terms * factor) return res # ============================================================================== # OLD DISTRIBUTIONS THAT MIGHT NOT", "return loguniform_mean(mean, np.sqrt(width)) def random_log_uniform(v_min, v_max, size): \"\"\" returns random variables that a", "ValueError('Variance must be positive') elif var == 0: # treat special case separately", "other.count - 1 dof = (s1 + s2)**2/(s1**2/nu1 + s2**2/nu2) # calculate the", "np.sum(self._terms[..., :] * factor, axis=1) elif x == 0: res = 0 else:", "np.sqrt(1 - np.exp(-dist_b)) else: raise ValueError('Unknown distance `%s`' % kind) return dist def", "add_observation(self, value): \"\"\" add an observed value and adjust mean and variance of", "* (q + 1) * np.log(q) / (q - 1) - 1 -", ":] res[nz] = np.sum(self._terms[..., :] * factor, axis=1) else: Theta = (np.diag(-self.rates, 0)", "+ (self.mean - self.mean)**2)/other.var - 1) elif kind == 'bhattacharyya': var_ratio = self.var/other.var", "the sum of `count` log-normally distributed random variables with mean 1 and variance", "+ variance/mean2)) if definition == 'scipy': return mu, sigma elif definition == 'numpy':", "np.zeros_like(x) idx = (1 < x*s) & (x < s) res[idx] = 1/(x[idx]", "= np.prod(mat, 1) def rvs(self, size): \"\"\" random variates \"\"\" # choose the", "frac] = 0 return res def _pdf(self, x, s, frac): \"\"\" probability density", "res[nz] = np.sum(self._terms * factor) else: Theta = np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1)", "`%s`' % definition) def lognorm_mean(mean, sigma): \"\"\" returns a lognormal distribution parameterized by", "== 1: # treat special case separately return DeterministicDistribution(mean) else: scale = mean", "special, linalg, optimize from ..data_structures.cache import cached_property def lognorm_mean_var_to_mu_sigma(mean, variance, definition='scipy'): \"\"\" determines", "..data_structures.cache import cached_property def lognorm_mean_var_to_mu_sigma(mean, variance, definition='scipy'): \"\"\" determines the parameters of the", "directly return lognorm_mean_var(count * mean, count * variance) elif method == 'leastsq': #", "prob def overlap(self, other, common_variance=True): \"\"\" estimates the amount of overlap between two", "= np.zeros_like(x) idx = (1 < x*s) & (x < s) log_s =", "res = 1 - np.sum(self._terms * factor) return res # ============================================================================== # OLD", "mean) return NormalDistribution(mean, M2/(count - 1), count) def distance(self, other, kind='kullback-leibler'): \"\"\" return", "nu2 = self.count - 1, other.count - 1 dof = (s1 + s2)**2/(s1**2/nu1", "the distribution `dist` \"\"\" mean = dist.mean() var = dist.var() return (dist.moment(3) -", "percent point function (inverse of cdf) \"\"\" s = s[0] # reset broadcasting", "(q - (1 - frac)) / frac res = np.zeros_like(q) idx = (q_scale", "# calculate the probability using the Student's T distribution prob = stats.t.sf(np.abs(t), dof)", "for alpha in self.alpha) def mean(self): \"\"\" mean of the distribution \"\"\" return", "x == 0: res = 0 else: if self.method == 'sum': factor =", "= s**(2*q[idx] - 1) return res def _stats(self, s): \"\"\" calculates statistics of", "match the first two moments of the distribution `leastsq` - minimize the error", "1.) def _stats(self): return 0., 0., 0., 0. def _rvs(self): return np.zeros(self._size) DeterministicDistribution", "= (self.mean - other.mean)/np.sqrt(s1 + s2) # calculate the probability using the Student's", "sum of `count` log-normally distributed random variables with mean 1 and variance `var_norm`.", "s, self._size) def _pdf(self, x, s): \"\"\" probability density function \"\"\" s =", "!= len(self.alpha): raise ValueError('The current implementation only supports cases ' 'where all rates", "np.log(s*s)) return res def _cdf(self, x, s, frac): \"\"\" cumulative probability function \"\"\"", "\"\"\" # choose the receptor response characteristics return random_log_uniform(1/s, s, self._size) def _pdf(self,", "1) return res PartialLogUniformDistribution = PartialLogUniformDistribution_gen( a=0, name='PartialLogUniformDistribution' ) NORMAL_DISTRIBUTION_NORMALIZATION = 1/np.sqrt(2*np.pi) class", "adjust mean and variance of the distribution. This returns a new distribution and", "else: mean = self.mean[mask] var = self.var[mask] std = self.std[mask] return NORMAL_DISTRIBUTION_NORMALIZATION/std \\", "= variance / mean return stats.gamma(scale=beta, a=alpha) def loguniform_mean(mean, width): \"\"\" returns a", "receptors randomly if frac != 1: res[np.random.random(self._size) > frac] = 0 return res", "np.asarray(rates) self.alpha = 1 / self.rates if np.any(rates <= 0): raise ValueError('All rates", "np.zeros_like(x) nz = (x > 0) if np.any(nz): if self.method == 'sum': factor", "res = np.zeros_like(q) idx = (q > 0) res[idx] = s**(2*q[idx] - 1)", "distribution. This returns a new distribution and only works if count was set", "and a spread parameter `width`. The ratio between the maximal value and the", "ValueError('Unknown distance `%s`' % kind) return dist def welch_test(self, other): \"\"\" performs Welch's", "s, frac): \"\"\" probability density function \"\"\" s, frac = s[0], frac[0] #", "dist.var() return (dist.moment(3) - 3*mean*var - mean**3) / var**(3/2) class DeterministicDistribution_gen(stats.rv_continuous): \"\"\" deterministic", "moments of the distribution `leastsq` - minimize the error in the interval \"\"\"", "s[0] # reset broadcasting res = np.zeros_like(q) idx = (q > 0) res[idx]", "density=True) def pdf_diff(params): \"\"\" evaluate the estimated pdf \"\"\" scale, sigma = params", "\"\"\" return the distance between two normal distributions \"\"\" if kind == 'kullback-leibler':", "= count sum_var = count * var_norm # get random numbers dist =", "sum_var, 'scipy') params, _ = optimize.leastsq(pdf_diff, params_init) return params def lognorm_sum(count, mean, variance,", "None: mean = self.mean var = self.var std = self.std else: mean =", "the maximal value and the minimal value is given by width**2 \"\"\" if", "density # do the least square fitting params_init = lognorm_mean_var_to_mu_sigma(sum_mean, sum_var, 'scipy') params,", "0: res = 0 else: if self.method == 'sum': factor = np.exp(-x*self.rates)/self.ratesx res[nz]", "def pdf(self, x): \"\"\" probability density function \"\"\" if not np.isscalar(x): x =", "res def _ppf(self, q, s): \"\"\" percent point function (inverse of cdf) \"\"\"", "alpha = mean**2 / variance beta = variance / mean return stats.gamma(scale=beta, a=alpha)", "* var_norm # get random numbers dist = lognorm_mean_var(1, var_norm) vals = dist.rvs((int(sim_terms),", "used to choose a definition of the resulting parameters that is suitable for", "- 1, other.count - 1 dof = (s1 + s2)**2/(s1**2/nu1 + s2**2/nu2) #", "of the log-normal distribution such that the distribution yields a given mean and", "self._size) def _pdf(self, x, s): \"\"\" probability density function \"\"\" s = s[0]", "= DeterministicDistribution_gen( name='DeterministicDistribution' ) class LogUniformDistribution_gen(stats.rv_continuous): \"\"\" Log-uniform distribution. \"\"\" def freeze(self, *args,", "rates of the underlying exponential processes `method` determines what method is used for", "np.where(x < 0, 0., 1.) def _stats(self): return 0., 0., 0., 0. def", "by its mean and variance. Here, we need to solve a non-linear equation", "s**(2*q[idx] - 1) return res def _stats(self, s): \"\"\" calculates statistics of the", "parameters of the log-normal distribution such that the distribution yields a given mean", "the given parameters. Here, several methods can be used: `fenton` - match the", "/ (4 * s**2 * np.log(s)**2) return mean, var, None, None LogUniformDistribution =", "M2 = M2 + delta*(value - mean) return NormalDistribution(mean, M2/(count - 1), count)", "= var self.count = count def copy(self): return self.__class__(self.mean, self.var, self.count) @cached_property() def", "mean2/np.sqrt(mean2 + variance) sigma = np.sqrt(np.log(1 + variance/mean2)) if definition == 'scipy': return", ":].sum() elif x == 0: res = 0 else: if self.method == 'sum':", "_cdf(self, x, s, frac): \"\"\" cumulative probability function \"\"\" s, frac = s[0],", "* mean, s=sigma) else: raise ValueError('Unknown method `%s` for determining the sum of", "None: return self.copy() else: M2 = self.var*(self.count - 1) count = self.count +", "* (2*scale*np.log(scale)) / (scale**2 - 1) return (width / scale, width * scale)", "histogram obtained by drawing `sim_terms` random numbers \"\"\" sum_mean = count sum_var =", "- self.mean mean = self.mean + delta/count M2 = M2 + delta*(value -", "= np.zeros_like(x) nz = (x > 0) if np.any(nz): factor = np.exp(-x[nz, None]*self.rates[...,", "response characteristics return random_log_uniform(1/s, s, self._size) def _pdf(self, x, s): \"\"\" probability density", "elif var == 0: # treat special case separately return DeterministicDistribution(mean) else: #", "np.zeros_like(x) idx = (1 < x*s) & (x < s) res[idx] = frac/(x[idx]", "raise ValueError('Unknown method `%s` for determining the sum of ' 'lognormal distributions. Accepted", "The optional parameter `definition` can be used to choose a definition of the", "else: raise ValueError('Unknown distance `%s`' % kind) return dist def welch_test(self, other): \"\"\"", "None] \\ / (self.alpha[:, None] - self.alpha[None, :]) mat[(self.alpha[:, None] - self.alpha[None, :])", "items response characteristics res = np.exp(s * np.random.standard_normal(self._size)) if frac != 1: #", "from __future__ import division import numpy as np from scipy import stats, special,", "= self.alpha[:, None] \\ / (self.alpha[:, None] - self.alpha[None, :]) mat[(self.alpha[:, None] -", "are determined by fitting the probability density function to a histogram obtained by", "= np.log(s) res[idx] = (log_s + np.log(x[idx]))/(2 * log_s) res[x > s] =", "s): \"\"\" probability density function \"\"\" s = s[0] # reset broadcasting res", "return DeterministicDistribution(mean) else: scale, sigma = lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy') return stats.lognorm(scale=scale, s=sigma) def", "\"\"\" if var < 0: raise ValueError('Variance must be positive') elif var ==", "(2 * np.sum(self.alpha**2 * self._terms) - (self.alpha.sum())**2) def pdf(self, x): \"\"\" probability density", "0.5*special.erf(np.log(x)/(s*np.sqrt(2)))) def _ppf(self, q, s, frac): \"\"\" percent point function (inverse of cdf)", "return stats.lognorm(scale=scale, s=sigma) def lognorm_sum_leastsq(count, var_norm, sim_terms=1e5, bins=64): \"\"\" returns the parameters of", "params_init) return params def lognorm_sum(count, mean, variance, method='fenton'): \"\"\" returns an estimate of", "mean, count * variance) elif method == 'leastsq': # determine the moments from", "a lognormal distribution parameterized by its mean and its variance. \"\"\" if variance", "\"\"\" s, frac = s[0], frac[0] # reset broadcasting return frac / (s*x*np.sqrt(2*np.pi))", "and `variance`. The returned distribution is again log-normal with mean and variance determined", "probability function \"\"\" s, frac = s[0], frac[0] # reset broadcasting res =", "= (x > 0) if np.any(nz): factor = np.exp(-x[nz, None]*self.rates[..., :]) res =", "cdf) \"\"\" s, frac = s[0], frac[0] # reset broadcasting q_scale = (q", "distribution parameterized by its mean and variance. Here, we need to solve a", "idx = (1 < x*s) & (x < s) log_s = np.log(s) res[idx]", "count = self.count + 1 delta = value - self.mean mean = self.mean", "term1 = np.log(0.25*(var_ratio + 1/var_ratio + 2)) term2 = (self.mean - other.mean)**2/(self.var +", "mean and a spread parameter `width`. The ratio between the maximal value and", "receptor response characteristics return sum(np.random.exponential(scale=alpha, size=size) for alpha in self.alpha) def mean(self): \"\"\"", "class NormalDistribution(object): \"\"\" class representing normal distributions \"\"\" def __init__(self, mean, var, count=None):", "np.diag(self.rates[:-1], 1)) for i in np.flatnonzero(nz): res.flat[i] = \\ 1 - linalg.expm(x.flat[i]*Theta)[0, :].sum()", "not np.isscalar(x): x = np.asarray(x) res = np.zeros_like(x) nz = (x > 0)", "freedom s1, s2 = self.var/self.count, other.var/other.count nu1, nu2 = self.count - 1, other.count", "of the distribution `leastsq` - minimize the error in the interval \"\"\" if", "(self.alpha[:, None] - self.alpha[None, :]) mat[(self.alpha[:, None] - self.alpha[None, :]) == 0] =", "linalg, optimize from ..data_structures.cache import cached_property def lognorm_mean_var_to_mu_sigma(mean, variance, definition='scipy'): \"\"\" determines the", "mean return stats.gamma(scale=beta, a=alpha) def loguniform_mean(mean, width): \"\"\" returns a loguniform distribution parameterized", "distribution parameterized by its mean and a spread parameter `width`. The ratio between", "res[idx] = 1/(x[idx] * np.log(s*s)) return res def _cdf(self, x, s): \"\"\" cumulative", "None]*self.rates[..., :]) res = 1 - np.sum(self._terms[..., :] * factor, axis=1) elif x", "return np.where(x < 0, 0., 1.) def _stats(self): return 0., 0., 0., 0.", "distributed variables with `mean` and `variance`. The returned distribution is again log-normal with", "\"\"\" probability density function \"\"\" s, frac = s[0], frac[0] # reset broadcasting", "frac*res def _ppf(self, q, s, frac): \"\"\" percent point function (inverse of cdf)", "**kwds): frozen = super(LogUniformDistribution_gen, self).freeze(*args, **kwds) frozen.support = self.support(*args, **kwds) return frozen def", "size): \"\"\" returns random variables that a distributed uniformly in log space \"\"\"", "S = np.sqrt(0.5*(self.var + other.var)) else: # other is sampled S = self.std", "s=width) def loguniform_mean_var(mean, var): \"\"\" returns a loguniform distribution parameterized by its mean", "0) res[idx] = np.exp(s * special.ndtri(q_scale[idx])) return res PartialLogNormDistribution = PartialLogNormDistribution_gen( a=0, name='PartialLogNormDistribution'", "width): \"\"\" returns a loguniform distribution parameterized by its mean and a spread", "- frac` is zero Similar to the lognorm distribution, this does not support", "s**2 * np.log(s)**2) return mean, var, None, None LogUniformDistribution = LogUniformDistribution_gen( a=0, name='LogUniformDistribution'", "response characteristics res = np.exp(s * np.random.standard_normal(self._size)) if frac != 1: # switch", "s[0], frac[0] # reset broadcasting return frac / (s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2) def _cdf(self,", "variables with `mean` and `variance`. The returned distribution is again log-normal with mean", "= mean2/np.sqrt(mean2 + variance) sigma = np.sqrt(np.log(1 + variance/mean2)) if definition == 'scipy':", "methods can be used: `fenton` - match the first two moments of the", "build upon the scipy.stats package and extend it. ''' from __future__ import division", "or `eigen` \"\"\" if method in {'sum', 'eigen'}: self.method = method # prepare", "\"\"\" Hypoexponential distribution. Unfortunately, the framework supplied by scipy.stats.rv_continuous does not support a", "positive') if len(np.unique(self.alpha)) != len(self.alpha): raise ValueError('The current implementation only supports cases '", "stats.lognorm(scale=mu, s=sigma) def lognorm_mean_var(mean, variance): \"\"\" returns a lognormal distribution parameterized by its", "return stats.lognorm(scale=scale * mean, s=sigma) else: raise ValueError('Unknown method `%s` for determining the", "self.count) @cached_property() def std(self): \"\"\" return standard deviation \"\"\" return np.sqrt(self.var) def pdf(self,", "ValueError('All rates must be positive') if len(np.unique(self.alpha)) != len(self.alpha): raise ValueError('The current implementation", "return DeterministicDistribution(mean) else: mu = mean * np.exp(-0.5 * sigma**2) return stats.lognorm(scale=mu, s=sigma)", "= 0 else: factor = np.exp(-x*self.rates) res = 1 - np.sum(self._terms * factor)", "\"\"\" match the coefficient of variation \"\"\" return 0.5 * (q + 1)", "estimates the amount of overlap between two distributions \"\"\" if common_variance: if self.count", "raise ValueError('Variance must be positive') elif var == 0: # treat special case", "probability distributions, which build upon the scipy.stats package and extend it. ''' from", "return standard deviation \"\"\" return np.sqrt(self.var) def pdf(self, value, mask=None): \"\"\" return probability", "- 1)*self.var + (other.count - 1)*other.var) S = np.sqrt(expr/(self.count + other.count - 2))", "'scipy': return mu, sigma elif definition == 'numpy': return np.log(mu), sigma else: raise", "x): \"\"\" probability density function \"\"\" if not np.isscalar(x): x = np.asarray(x) res", "the framework supplied by scipy.stats.rv_continuous does not support a variable number of parameters", "is sampled S = other.std else: # both are sampled expr = ((self.count", "1 - np.sum(self._terms * factor) return res # ============================================================================== # OLD DISTRIBUTIONS THAT", "= stats.t.sf(np.abs(t), dof) * 2 return prob def overlap(self, other, common_variance=True): \"\"\" estimates", "many observations were used to estimate the parameters. All values can also be", "is zero \"\"\" def _rvs(self, s, frac): \"\"\" random variates \"\"\" # choose", "variance, method='fenton'): \"\"\" returns an estimate of the distribution of the sum of", "idx = (q_scale > 0) res[idx] = np.exp(s * special.ndtri(q_scale[idx])) return res PartialLogNormDistribution", "s2)**2/(s1**2/nu1 + s2**2/nu2) # calculate the Welch t-value t = (self.mean - other.mean)/np.sqrt(s1", "sum of ' 'lognormal distributions. Accepted methods are ' '[`fenton`, `leastsq`].') def gamma_mean_var(mean,", "from https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous \"\"\" def _cdf(self, x): return np.where(x < 0, 0., 1.) def", "`sim_terms` random numbers \"\"\" sum_mean = count sum_var = count * var_norm #", "NormalDistribution(mean, M2/(count - 1), count) def distance(self, other, kind='kullback-leibler'): \"\"\" return the distance", "count=None): \"\"\" normal distributions are described by their mean and variance. Additionally, count", "method='sum'): \"\"\" initializes the hypoexponential distribution. `rates` are the rates of the underlying", "= self.count - 1, other.count - 1 dof = (s1 + s2)**2/(s1**2/nu1 +", "set \"\"\" if self.count is None: return self.copy() else: M2 = self.var*(self.count -", "(np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1)) for i in np.flatnonzero(nz): res.flat[i] = \\ 1", "+ 2)) term2 = (self.mean - other.mean)**2/(self.var + other.var) dist = 0.25*(term1 +", "def rvs(self, size): \"\"\" random variates \"\"\" # choose the receptor response characteristics", "the distribution \"\"\" return self.alpha.sum() def variance(self): \"\"\" variance of the distribution \"\"\"", "self.var = var self.count = count def copy(self): return self.__class__(self.mean, self.var, self.count) @cached_property()", "np.errstate(divide='ignore'): mat = self.alpha[:, None] \\ / (self.alpha[:, None] - self.alpha[None, :]) mat[(self.alpha[:,", "1) count = self.count + 1 delta = value - self.mean mean =", "\"\"\" add an observed value and adjust mean and variance of the distribution.", "std(self): \"\"\" return standard deviation \"\"\" return np.sqrt(self.var) def pdf(self, value, mask=None): \"\"\"", "interface here. \"\"\" def __init__(self, rates, method='sum'): \"\"\" initializes the hypoexponential distribution. `rates`", "parameters that is suitable for the given software package. \"\"\" mean2 = mean**2", "+ 1) * np.log(q) / (q - 1) - 1 - cv2 width", "is used for calculating the cdf and can be either `sum` or `eigen`", "values can also be numpy arrays to represent many distributions efficiently \"\"\" self.mean", "# get random numbers dist = lognorm_mean_var(1, var_norm) vals = dist.rvs((int(sim_terms), count)).sum(axis=1) #", "s = s[0] # reset broadcasting res = np.zeros_like(x) idx = (1 <", "`1 - frac` is zero Similar to the lognorm distribution, this does not", "\"\"\" if method == 'fenton': # use the moments directly return lognorm_mean_var(count *", "2 return prob def overlap(self, other, common_variance=True): \"\"\" estimates the amount of overlap", "# calculate the Welch t-value t = (self.mean - other.mean)/np.sqrt(s1 + s2) #", "S = self.std else: if other.count is None: # self is sampled S", "how many observations were used to estimate the parameters. All values can also", "we thus only mimic its interface here. \"\"\" def __init__(self, rates, method='sum'): \"\"\"", "other, kind='kullback-leibler'): \"\"\" return the distance between two normal distributions \"\"\" if kind", "def copy(self): return self.__class__(self.mean, self.var, self.count) @cached_property() def std(self): \"\"\" return standard deviation", "is sampled S = self.std else: if other.count is None: # self is", "log_s) res[x > s] = 1 return res def _ppf(self, q, s): \"\"\"", "response characteristics res = random_log_uniform(1/s, s, self._size) # switch off receptors randomly if", "return the distance between two normal distributions \"\"\" if kind == 'kullback-leibler': dist", "the distribution `leastsq` - minimize the error in the interval \"\"\" if method", "np.exp(s * special.ndtri(q_scale[idx])) return res PartialLogNormDistribution = PartialLogNormDistribution_gen( a=0, name='PartialLogNormDistribution' ) class PartialLogUniformDistribution_gen(stats.rv_continuous):", "mean**2 / variance beta = variance / mean return stats.gamma(scale=beta, a=alpha) def loguniform_mean(mean,", "0] = 1 self._terms = np.prod(mat, 1) def rvs(self, size): \"\"\" random variates", "= mean * (2*width*np.log(width)) / (width**2 - 1) return LogUniformDistribution(scale=scale, s=width) def loguniform_mean_var(mean,", "the cdf and can be either `sum` or `eigen` \"\"\" if method in", "get random numbers dist = lognorm_mean_var(1, var_norm) vals = dist.rvs((int(sim_terms), count)).sum(axis=1) # get", "s] = 1 return res def _ppf(self, q, s): \"\"\" percent point function", "random variates \"\"\" # choose the items response characteristics res = np.exp(s *", ") class HypoExponentialDistribution(object): \"\"\" Hypoexponential distribution. Unfortunately, the framework supplied by scipy.stats.rv_continuous does", "dist = 0.5*(np.log(other.var/self.var) + (self.var + (self.mean - self.mean)**2)/other.var - 1) elif kind", "PartialLogNormDistribution = PartialLogNormDistribution_gen( a=0, name='PartialLogNormDistribution' ) class PartialLogUniformDistribution_gen(stats.rv_continuous): \"\"\" partial log-uniform distribution. a", "'sum': factor = np.exp(-x*self.rates)/self.ratesx res[nz] = np.sum(self._terms * factor) else: Theta = np.diag(-self.rates,", "of the result \"\"\" if var < 0: raise ValueError('Variance must be positive')", "Theta = (np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1)) for i in np.flatnonzero(nz): res.flat[i] =", "described by their mean and variance. Additionally, count denotes how many observations were", "= 1 return (1 - frac) + frac*res def _ppf(self, q, s, frac):", "0 else: factor = np.exp(-x*self.rates) res = 1 - np.sum(self._terms * factor) return", "Welch's t-test of two normal distributions \"\"\" # calculate the degrees of freedom", "- other.mean)/S return 2*stats.norm.cdf(-0.5*delta) else: # here, we would have to integrate numerically", "(s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2) def _cdf(self, x, s, frac): \"\"\" cumulative probability function \"\"\"", "res = np.zeros_like(x) nz = (x > 0) if np.any(nz): if self.method ==", "params return stats.lognorm.pdf(xs, scale=scale, s=sigma) - density # do the least square fitting", "\"\"\" return the interval in which the PDF of the distribution is non-zero", "we need to solve a non-linear equation numerically, which might degrade accuracy and", "np.linspace(0, val_max, bins + 1) xs = 0.5*(bins[:-1] + bins[1:]) density, _ =", "of ' 'lognormal distributions. Accepted methods are ' '[`fenton`, `leastsq`].') def gamma_mean_var(mean, variance):", "mean self.var = var self.count = count def copy(self): return self.__class__(self.mean, self.var, self.count)", "\"\"\" def __init__(self, mean, var, count=None): \"\"\" normal distributions are described by their", "/ mean**2 # match square coefficient of variation def _rhs(q): \"\"\" match the", "(dist.moment(3) - 3*mean*var - mean**3) / var**(3/2) class DeterministicDistribution_gen(stats.rv_continuous): \"\"\" deterministic distribution that", "mask=None): \"\"\" return probability density function at value \"\"\" if mask is None:", "1 and variance `var_norm`. These parameters are determined by fitting the probability density", "= dist.rvs((int(sim_terms), count)).sum(axis=1) # get the histogram val_max = sum_mean + 3 *", "0., 0. def _rvs(self): return np.zeros(self._size) DeterministicDistribution = DeterministicDistribution_gen( name='DeterministicDistribution' ) class LogUniformDistribution_gen(stats.rv_continuous):", "dist.rvs((int(sim_terms), count)).sum(axis=1) # get the histogram val_max = sum_mean + 3 * np.sqrt(sum_var)", "is None: mean = self.mean var = self.var std = self.std else: mean", "# neither is sampled S = np.sqrt(0.5*(self.var + other.var)) else: # other is", "the first two moments of the distribution `leastsq` - minimize the error in", "self.var*(self.count - 1) count = self.count + 1 delta = value - self.mean", "copied from https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous \"\"\" def _cdf(self, x): return np.where(x < 0, 0., 1.)", "* np.log(s) - (s**2 - 1)**2) \\ / (4 * s**2 * np.log(s)**2)", "\"\"\" if not np.isscalar(x): x = np.asarray(x) res = np.zeros_like(x) nz = (x", "- 1) * np.log(s) - (s**2 - 1)**2) \\ / (4 * s**2", "/ var) def add_observation(self, value): \"\"\" add an observed value and adjust mean", "density function to a histogram obtained by drawing `sim_terms` random numbers \"\"\" sum_mean", "1)*other.var) S = np.sqrt(expr/(self.count + other.count - 2)) delta = np.abs(self.mean - other.mean)/S", "= self.var/other.var term1 = np.log(0.25*(var_ratio + 1/var_ratio + 2)) term2 = (self.mean -", "reset broadcasting q_scale = (q - (1 - frac)) / frac res =", "> 0) res[idx] = np.exp(s * special.ndtri(q_scale[idx])) return res PartialLogNormDistribution = PartialLogNormDistribution_gen( a=0,", "\"\"\" returns a gamma distribution with given mean and variance \"\"\" alpha =", "`leastsq` - minimize the error in the interval \"\"\" if method == 'fenton':", "= variance / mean**2 scale, sigma = lognorm_sum_leastsq(count, var_norm) return stats.lognorm(scale=scale * mean,", "returns a lognormal distribution parameterized by its mean and its variance. \"\"\" if", "\"\"\" def __init__(self, rates, method='sum'): \"\"\" initializes the hypoexponential distribution. `rates` are the", "amount of overlap between two distributions \"\"\" if common_variance: if self.count is None:", "mean**2 mu = mean2/np.sqrt(mean2 + variance) sigma = np.sqrt(np.log(1 + variance/mean2)) if definition", "= method # prepare the rates of the system self.rates = np.asarray(rates) self.alpha", "_rhs(q): \"\"\" match the coefficient of variation \"\"\" return 0.5 * (q +", "# match square coefficient of variation def _rhs(q): \"\"\" match the coefficient of", "* np.random.standard_normal(self._size)) if frac != 1: # switch off items randomly res[np.random.random(self._size) >", "random_log_uniform(v_min, v_max, size): \"\"\" returns random variables that a distributed uniformly in log", "`eigen` \"\"\" if method in {'sum', 'eigen'}: self.method = method # prepare the", "_ = optimize.leastsq(pdf_diff, params_init) return params def lognorm_sum(count, mean, variance, method='fenton'): \"\"\" returns", "_ppf(self, q, s, frac): \"\"\" percent point function (inverse of cdf) \"\"\" s,", "np.exp(-x*self.rates)/self.ratesx res[nz] = np.sum(self._terms * factor) else: Theta = np.diag(-self.rates, 0) + np.diag(self.rates[:-1],", "np.diag(self.rates[:-1], 1) res = 1 - linalg.expm(x*Theta)[0, :].sum() return res def cdf(self, x):", "res = np.zeros_like(x) idx = (1 < x*s) & (x < s) log_s", "are different from each other.') # calculate terms that we need later with", "var = self.var std = self.std else: mean = self.mean[mask] var = self.var[mask]", "if self.count is None: if other.count is None: # neither is sampled S", "an estimate of the distribution of the sum of `count` log-normally distributed variables", "\"\"\" self.mean = mean self.var = var self.count = count def copy(self): return", "variance == 0: return DeterministicDistribution(mean) else: scale, sigma = lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy') return", "frac res = np.zeros_like(q) idx = (q_scale > 0) res[idx] = s**(2*q_scale[idx] -", "_ = np.histogram(vals, bins=bins, range=[0, val_max], density=True) def pdf_diff(params): \"\"\" evaluate the estimated", "# determine the moments from fitting var_norm = variance / mean**2 scale, sigma", "does not support any location parameter \"\"\" def _rvs(self, s, frac): \"\"\" random", "mean 1 and variance `var_norm`. These parameters are determined by fitting the probability", "distributions \"\"\" def __init__(self, mean, var, count=None): \"\"\" normal distributions are described by", "scale, sigma = lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy') return stats.lognorm(scale=scale, s=sigma) def lognorm_sum_leastsq(count, var_norm, sim_terms=1e5,", "24, 2015 @author: <NAME> <<EMAIL>> This module provides functions and classes for probability", "as np from scipy import stats, special, linalg, optimize from ..data_structures.cache import cached_property", "+ variance) sigma = np.sqrt(np.log(1 + variance/mean2)) if definition == 'scipy': return mu,", "_pdf(self, x, s): \"\"\" probability density function \"\"\" s = s[0] # reset", "_rvs(self, s, frac): \"\"\" random variates \"\"\" # choose the receptor response characteristics", "return res PartialLogNormDistribution = PartialLogNormDistribution_gen( a=0, name='PartialLogNormDistribution' ) class PartialLogUniformDistribution_gen(stats.rv_continuous): \"\"\" partial log-uniform", "implementation only supports cases ' 'where all rates are different from each other.')", "x*s) & (x < s) res[idx] = frac/(x[idx] * np.log(s*s)) return res def", "bins=bins, range=[0, val_max], density=True) def pdf_diff(params): \"\"\" evaluate the estimated pdf \"\"\" scale,", "probability density function \"\"\" s, frac = s[0], frac[0] # reset broadcasting res", "parameterized by its mean and a spread parameter `width`. The ratio between the", "= lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy') return stats.lognorm(scale=scale, s=sigma) def lognorm_sum_leastsq(count, var_norm, sim_terms=1e5, bins=64): \"\"\"", "(x > 0) if np.any(nz): if self.method == 'sum': factor = np.exp(-x[nz, None]", "count) def distance(self, other, kind='kullback-leibler'): \"\"\" return the distance between two normal distributions", "None LogUniformDistribution = LogUniformDistribution_gen( a=0, name='LogUniformDistribution' ) class HypoExponentialDistribution(object): \"\"\" Hypoexponential distribution. Unfortunately,", "ratio between the maximal value and the minimal value is given by width**2", "of `count` log-normally distributed random variables with mean 1 and variance `var_norm`. These", "res[idx] = s**(2*q[idx] - 1) return res def _stats(self, s): \"\"\" calculates statistics", "by scipy.stats.rv_continuous does not support a variable number of parameters and we thus", "observations were used to estimate the parameters. All values can also be numpy", "here. \"\"\" def __init__(self, rates, method='sum'): \"\"\" initializes the hypoexponential distribution. `rates` are", "= np.exp(-x*self.rates) res = 1 - np.sum(self._terms * factor) return res # ==============================================================================", "terms that we need later with np.errstate(divide='ignore'): mat = self.alpha[:, None] \\ /", "self.count is None: return self.copy() else: M2 = self.var*(self.count - 1) count =", "return self.copy() else: M2 = self.var*(self.count - 1) count = self.count + 1", "from each other.') # calculate terms that we need later with np.errstate(divide='ignore'): mat", "the error in the interval \"\"\" if method == 'fenton': # use the", "\"\"\" scale, sigma = params return stats.lognorm.pdf(xs, scale=scale, s=sigma) - density # do", "def cdf(self, x): \"\"\" cumulative density function \"\"\" if not np.isscalar(x): x =", "return NORMAL_DISTRIBUTION_NORMALIZATION/std \\ * np.exp(-0.5*(value - mean)**2 / var) def add_observation(self, value): \"\"\"", "return dist def welch_test(self, other): \"\"\" performs Welch's t-test of two normal distributions", "s, frac = s[0], frac[0] # reset broadcasting q_scale = (q - (1", "M2 = self.var*(self.count - 1) count = self.count + 1 delta = value", "at value \"\"\" if mask is None: mean = self.mean var = self.var", "\"\"\" cumulative density function \"\"\" if not np.isscalar(x): x = np.asarray(x) res =", "\"\"\" probability density function \"\"\" s = s[0] # reset broadcasting res =", "class PartialLogNormDistribution_gen(stats.rv_continuous): \"\"\" partial log-normal distribution. a fraction `frac` of the distribution follows", "(x < s) res[idx] = 1/(x[idx] * np.log(s*s)) return res def _cdf(self, x,", "with np.errstate(divide='ignore'): mat = self.alpha[:, None] \\ / (self.alpha[:, None] - self.alpha[None, :])", "variance): \"\"\" returns a lognormal distribution parameterized by its mean and its variance.", "DeterministicDistribution(mean) else: # determine width parameter numerically cv2 = var / mean**2 #", "division import numpy as np from scipy import stats, special, linalg, optimize from", "parameter `definition` can be used to choose a definition of the resulting parameters", "random numbers dist = lognorm_mean_var(1, var_norm) vals = dist.rvs((int(sim_terms), count)).sum(axis=1) # get the", "\"\"\" extra_args, _, _, _ = self._parse_args_stats(*args, **kwds) mean = self.mean(*args, **kwds) scale", "name='PartialLogNormDistribution' ) class PartialLogUniformDistribution_gen(stats.rv_continuous): \"\"\" partial log-uniform distribution. a fraction `frac` of the", "return params def lognorm_sum(count, mean, variance, method='fenton'): \"\"\" returns an estimate of the", "return stats.lognorm(scale=mu, s=sigma) def lognorm_mean_var(mean, variance): \"\"\" returns a lognormal distribution parameterized by", "> 0) if np.any(nz): factor = np.exp(-x[nz, None]*self.rates[..., :]) res = 1 -", "_stats(self): return 0., 0., 0., 0. def _rvs(self): return np.zeros(self._size) DeterministicDistribution = DeterministicDistribution_gen(", ") class LogUniformDistribution_gen(stats.rv_continuous): \"\"\" Log-uniform distribution. \"\"\" def freeze(self, *args, **kwds): frozen =", "res = np.random.uniform(log_min, log_max, size) return np.exp(res) def dist_skewness(dist): \"\"\" returns the skewness", "and variance determined from the given parameters. Here, several methods can be used:", "the parameters of a log-normal distribution that estimates the sum of `count` log-normally", "(self.mean - other.mean)**2/(self.var + other.var) dist = 0.25*(term1 + term2) elif kind ==", "by its mean and a spread parameter `width`. The ratio between the maximal", "were used to estimate the parameters. All values can also be numpy arrays", "= sum_mean + 3 * np.sqrt(sum_var) bins = np.linspace(0, val_max, bins + 1)", "M2/(count - 1), count) def distance(self, other, kind='kullback-leibler'): \"\"\" return the distance between", "for i in np.flatnonzero(nz): res.flat[i] = \\ 1 - linalg.expm(x.flat[i]*Theta)[0, :].sum() elif x", "val_max = sum_mean + 3 * np.sqrt(sum_var) bins = np.linspace(0, val_max, bins +", "method='fenton'): \"\"\" returns an estimate of the distribution of the sum of `count`", "result \"\"\" if var < 0: raise ValueError('Variance must be positive') elif var", "the distribution \"\"\" return (2 * np.sum(self.alpha**2 * self._terms) - (self.alpha.sum())**2) def pdf(self,", "given value Code copied from https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous \"\"\" def _cdf(self, x): return np.where(x <", "s, frac = s[0], frac[0] # reset broadcasting return 1 + frac*(-0.5 +", "determined from the given parameters. Here, several methods can be used: `fenton` -", "separately return DeterministicDistribution(mean) else: scale = mean * (2*width*np.log(width)) / (width**2 - 1)", "s2**2/nu2) # calculate the Welch t-value t = (self.mean - other.mean)/np.sqrt(s1 + s2)", "_ = self._parse_args_stats(*args, **kwds) mean = self.mean(*args, **kwds) scale = extra_args[0] width =", "\"\"\" mean2 = mean**2 mu = mean2/np.sqrt(mean2 + variance) sigma = np.sqrt(np.log(1 +", "np.exp(-x[nz, None]*self.rates[..., :]) res = 1 - np.sum(self._terms[..., :] * factor, axis=1) elif", "idx = (q > 0) res[idx] = s**(2*q[idx] - 1) return res def", "i in np.flatnonzero(nz): res.flat[i] = \\ 1 - linalg.expm(x.flat[i]*Theta)[0, :].sum() elif x ==", "does not support a variable number of parameters and we thus only mimic", "the estimated pdf \"\"\" scale, sigma = params return stats.lognorm.pdf(xs, scale=scale, s=sigma) -", "0., 0., 0. def _rvs(self): return np.zeros(self._size) DeterministicDistribution = DeterministicDistribution_gen( name='DeterministicDistribution' ) class", "was set \"\"\" if self.count is None: return self.copy() else: M2 = self.var*(self.count", "self._terms = np.prod(mat, 1) def rvs(self, size): \"\"\" random variates \"\"\" # choose", "np.flatnonzero(nz): res.flat[i] = \\ 1 - linalg.expm(x.flat[i]*Theta)[0, :].sum() elif x == 0: res", "self.mean[mask] var = self.var[mask] std = self.std[mask] return NORMAL_DISTRIBUTION_NORMALIZATION/std \\ * np.exp(-0.5*(value -", "0: raise ValueError('Variance must be positive') elif var == 0: # treat special", "var = self.var[mask] std = self.std[mask] return NORMAL_DISTRIBUTION_NORMALIZATION/std \\ * np.exp(-0.5*(value - mean)**2", "0: # treat special case separately return DeterministicDistribution(mean) else: # determine width parameter", "1) - 1 - cv2 width = optimize.newton(_rhs, 1.1) return loguniform_mean(mean, np.sqrt(width)) def", "it. ''' from __future__ import division import numpy as np from scipy import", "function \"\"\" s, frac = s[0], frac[0] # reset broadcasting return frac /", "dist = np.sqrt(1 - np.exp(-dist_b)) else: raise ValueError('Unknown distance `%s`' % kind) return", "__init__(self, rates, method='sum'): \"\"\" initializes the hypoexponential distribution. `rates` are the rates of", "probability density function \"\"\" s = s[0] # reset broadcasting res = np.zeros_like(x)", "============================================================================== # OLD DISTRIBUTIONS THAT MIGHT NOT BE NEEDED ANYMORE # ============================================================================== class", "broadcasting res = np.zeros_like(x) idx = (1 < x*s) & (x < s)", "width = mean * (2*scale*np.log(scale)) / (scale**2 - 1) return (width / scale,", "response characteristics return sum(np.random.exponential(scale=alpha, size=size) for alpha in self.alpha) def mean(self): \"\"\" mean", "(log_s + np.log(x[idx]))/(2 * log_s) res[x > s] = 1 return (1 -", "np.zeros_like(q) idx = (q_scale > 0) res[idx] = np.exp(s * special.ndtri(q_scale[idx])) return res", "res = np.zeros_like(q) idx = (q_scale > 0) res[idx] = np.exp(s * special.ndtri(q_scale[idx]))", "* factor, axis=1) elif x == 0: res = 0 else: factor =", "= mean**2 mu = mean2/np.sqrt(mean2 + variance) sigma = np.sqrt(np.log(1 + variance/mean2)) if", "definition == 'scipy': return mu, sigma elif definition == 'numpy': return np.log(mu), sigma", "of cdf) \"\"\" s, frac = s[0], frac[0] # reset broadcasting q_scale =", "variance beta = variance / mean return stats.gamma(scale=beta, a=alpha) def loguniform_mean(mean, width): \"\"\"", "1.1) return loguniform_mean(mean, np.sqrt(width)) def random_log_uniform(v_min, v_max, size): \"\"\" returns random variables that", "bins=64): \"\"\" returns the parameters of a log-normal distribution that estimates the sum", "calculate the Welch t-value t = (self.mean - other.mean)/np.sqrt(s1 + s2) # calculate", "/ mean return stats.gamma(scale=beta, a=alpha) def loguniform_mean(mean, width): \"\"\" returns a loguniform distribution", "mean**2 # match square coefficient of variation def _rhs(q): \"\"\" match the coefficient", "= (log_s + np.log(x[idx]))/(2 * log_s) res[x > s] = 1 return res", "bins + 1) xs = 0.5*(bins[:-1] + bins[1:]) density, _ = np.histogram(vals, bins=bins,", "elif x == 0: res = 0 else: factor = np.exp(-x*self.rates) res =", "add an observed value and adjust mean and variance of the distribution. This", "are described by their mean and variance. Additionally, count denotes how many observations", "np.any(nz): factor = np.exp(-x[nz, None]*self.rates[..., :]) res = 1 - np.sum(self._terms[..., :] *", "= self.mean + delta/count M2 = M2 + delta*(value - mean) return NormalDistribution(mean,", "be positive') if len(np.unique(self.alpha)) != len(self.alpha): raise ValueError('The current implementation only supports cases", "# switch off receptors randomly if frac != 1: res[np.random.random(self._size) > frac] =", "+ np.log(x[idx]))/(2 * log_s) res[x > s] = 1 return (1 - frac)", "* 2 return prob def overlap(self, other, common_variance=True): \"\"\" estimates the amount of", "np.random.standard_normal(self._size)) if frac != 1: # switch off items randomly res[np.random.random(self._size) > frac]", "\"\"\" determines the parameters of the log-normal distribution such that the distribution yields", "variance \"\"\" alpha = mean**2 / variance beta = variance / mean return", "the scipy.stats package and extend it. ''' from __future__ import division import numpy", "variates \"\"\" # choose the receptor response characteristics return sum(np.random.exponential(scale=alpha, size=size) for alpha", "t-test of two normal distributions \"\"\" # calculate the degrees of freedom s1,", "- linalg.expm(x*Theta)[0, :].sum() return res def cdf(self, x): \"\"\" cumulative density function \"\"\"", "probability function \"\"\" s, frac = s[0], frac[0] # reset broadcasting return 1", "variance. \"\"\" if variance == 0: return DeterministicDistribution(mean) else: scale, sigma = lognorm_mean_var_to_mu_sigma(mean,", "np.sum(self._terms[..., :] * factor, axis=1) else: Theta = (np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1))", "1: # switch off items randomly res[np.random.random(self._size) > frac] = 0 return res", "\"\"\" return self.alpha.sum() def variance(self): \"\"\" variance of the distribution \"\"\" return (2", "= s[0] # reset broadcasting res = np.zeros_like(x) idx = (1 < x*s)", "np.log(v_min), np.log(v_max) res = np.random.uniform(log_min, log_max, size) return np.exp(res) def dist_skewness(dist): \"\"\" returns", "* np.log(q) / (q - 1) - 1 - cv2 width = optimize.newton(_rhs,", "if not np.isscalar(x): x = np.asarray(x) res = np.zeros_like(x) nz = (x >", "\"\"\" mean = (s**2 - 1)/(2*s*np.log(s)) var = ((s**4 - 1) * np.log(s)", "definition == 'numpy': return np.log(mu), sigma else: raise ValueError('Unknown definition `%s`' % definition)", "return res def _ppf(self, q, s): \"\"\" percent point function (inverse of cdf)", "mean * (2*scale*np.log(scale)) / (scale**2 - 1) return (width / scale, width *", "mimic its interface here. \"\"\" def __init__(self, rates, method='sum'): \"\"\" initializes the hypoexponential", "distributions \"\"\" # calculate the degrees of freedom s1, s2 = self.var/self.count, other.var/other.count", "gamma distribution with given mean and variance \"\"\" alpha = mean**2 / variance", "var == 0: # treat special case separately return DeterministicDistribution(mean) else: # determine", "cases ' 'where all rates are different from each other.') # calculate terms", "parameters. All values can also be numpy arrays to represent many distributions efficiently", "else: factor = np.exp(-x*self.rates) res = 1 - np.sum(self._terms * factor) return res", "the interval in which the PDF of the distribution is non-zero \"\"\" extra_args,", "delta*(value - mean) return NormalDistribution(mean, M2/(count - 1), count) def distance(self, other, kind='kullback-leibler'):", "determining the sum of ' 'lognormal distributions. Accepted methods are ' '[`fenton`, `leastsq`].')", "S = other.std else: # both are sampled expr = ((self.count - 1)*self.var", "elif definition == 'numpy': return np.log(mu), sigma else: raise ValueError('Unknown definition `%s`' %", "a given value Code copied from https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous \"\"\" def _cdf(self, x): return np.where(x", "1)) for i in np.flatnonzero(nz): res.flat[i] = \\ 1 - linalg.expm(x.flat[i]*Theta)[0, :].sum() elif", "a non-linear equation numerically, which might degrade accuracy and performance of the result", "the system self.rates = np.asarray(rates) self.alpha = 1 / self.rates if np.any(rates <=", "self.alpha) def mean(self): \"\"\" mean of the distribution \"\"\" return self.alpha.sum() def variance(self):", "0: res = 0 else: factor = np.exp(-x*self.rates) res = 1 - np.sum(self._terms", "np.exp(-dist_b)) else: raise ValueError('Unknown distance `%s`' % kind) return dist def welch_test(self, other):", "var_norm # get random numbers dist = lognorm_mean_var(1, var_norm) vals = dist.rvs((int(sim_terms), count)).sum(axis=1)", "def lognorm_mean(mean, sigma): \"\"\" returns a lognormal distribution parameterized by its mean and", "# reset broadcasting return 1 + frac*(-0.5 + 0.5*special.erf(np.log(x)/(s*np.sqrt(2)))) def _ppf(self, q, s,", "returns the parameters of a log-normal distribution that estimates the sum of `count`", "each other.') # calculate terms that we need later with np.errstate(divide='ignore'): mat =", "random variates \"\"\" # choose the receptor response characteristics return random_log_uniform(1/s, s, self._size)", "def _cdf(self, x, s): \"\"\" cumulative probability function \"\"\" s = s[0] #", "method # prepare the rates of the system self.rates = np.asarray(rates) self.alpha =", "beta = variance / mean return stats.gamma(scale=beta, a=alpha) def loguniform_mean(mean, width): \"\"\" returns", "determine the moments from fitting var_norm = variance / mean**2 scale, sigma =", "that is suitable for the given software package. \"\"\" mean2 = mean**2 mu", "return (dist.moment(3) - 3*mean*var - mean**3) / var**(3/2) class DeterministicDistribution_gen(stats.rv_continuous): \"\"\" deterministic distribution", "== 'fenton': # use the moments directly return lognorm_mean_var(count * mean, count *", "returned distribution is again log-normal with mean and variance determined from the given", "log-normal distribution. a fraction `frac` of the distribution follows a log-normal distribution, while", "density, _ = np.histogram(vals, bins=bins, range=[0, val_max], density=True) def pdf_diff(params): \"\"\" evaluate the", "= np.histogram(vals, bins=bins, range=[0, val_max], density=True) def pdf_diff(params): \"\"\" evaluate the estimated pdf", "def support(self, *args, **kwds): \"\"\" return the interval in which the PDF of", "'numpy': return np.log(mu), sigma else: raise ValueError('Unknown definition `%s`' % definition) def lognorm_mean(mean,", "definition of the resulting parameters that is suitable for the given software package.", "frozen = super(LogUniformDistribution_gen, self).freeze(*args, **kwds) frozen.support = self.support(*args, **kwds) return frozen def support(self,", "**kwds) frozen.support = self.support(*args, **kwds) return frozen def support(self, *args, **kwds): \"\"\" return", "fraction `frac` of the distribution follows a log-normal distribution, while the remaining fraction", "a log-normal distribution that estimates the sum of `count` log-normally distributed random variables", "frac/(x[idx] * np.log(s*s)) return res def _cdf(self, x, s, frac): \"\"\" cumulative probability", "raise ValueError('All rates must be positive') if len(np.unique(self.alpha)) != len(self.alpha): raise ValueError('The current", "np.asarray(x) res = np.zeros_like(x) nz = (x > 0) if np.any(nz): if self.method", "= PartialLogNormDistribution_gen( a=0, name='PartialLogNormDistribution' ) class PartialLogUniformDistribution_gen(stats.rv_continuous): \"\"\" partial log-uniform distribution. a fraction", "choose the receptor response characteristics return random_log_uniform(1/s, s, self._size) def _pdf(self, x, s):", "support any location parameter \"\"\" def _rvs(self, s, frac): \"\"\" random variates \"\"\"", "= np.exp(s * special.ndtri(q_scale[idx])) return res PartialLogNormDistribution = PartialLogNormDistribution_gen( a=0, name='PartialLogNormDistribution' ) class", "2)) delta = np.abs(self.mean - other.mean)/S return 2*stats.norm.cdf(-0.5*delta) else: # here, we would", "calculate the probability using the Student's T distribution prob = stats.t.sf(np.abs(t), dof) *", "support(self, *args, **kwds): \"\"\" return the interval in which the PDF of the", "'eigen'}: self.method = method # prepare the rates of the system self.rates =", "lognorm distribution, this does not support any location parameter \"\"\" def _rvs(self, s,", "var_norm) return stats.lognorm(scale=scale * mean, s=sigma) else: raise ValueError('Unknown method `%s` for determining", "self._terms) - (self.alpha.sum())**2) def pdf(self, x): \"\"\" probability density function \"\"\" if not", "in {'sum', 'eigen'}: self.method = method # prepare the rates of the system", ":]) res = 1 - np.sum(self._terms[..., :] * factor, axis=1) elif x ==", "in the interval \"\"\" if method == 'fenton': # use the moments directly", "numerically, which might degrade accuracy and performance of the result \"\"\" if var", "scale = extra_args[0] width = mean * (2*scale*np.log(scale)) / (scale**2 - 1) return", "1) * np.log(s) - (s**2 - 1)**2) \\ / (4 * s**2 *", "square coefficient of variation def _rhs(q): \"\"\" match the coefficient of variation \"\"\"", "'bhattacharyya': var_ratio = self.var/other.var term1 = np.log(0.25*(var_ratio + 1/var_ratio + 2)) term2 =", "name='LogUniformDistribution' ) class HypoExponentialDistribution(object): \"\"\" Hypoexponential distribution. Unfortunately, the framework supplied by scipy.stats.rv_continuous", "= np.zeros_like(q) idx = (q > 0) res[idx] = s**(2*q[idx] - 1) return", "`count` log-normally distributed variables with `mean` and `variance`. The returned distribution is again", "var**(3/2) class DeterministicDistribution_gen(stats.rv_continuous): \"\"\" deterministic distribution that always returns a given value Code", "estimate the parameters. All values can also be numpy arrays to represent many", "= (x > 0) if np.any(nz): if self.method == 'sum': factor = np.exp(-x[nz,", "returns random variables that a distributed uniformly in log space \"\"\" log_min, log_max", "probability function \"\"\" s = s[0] # reset broadcasting res = np.zeros_like(x) idx", "other, common_variance=True): \"\"\" estimates the amount of overlap between two distributions \"\"\" if", "value): \"\"\" add an observed value and adjust mean and variance of the", "_, _ = self._parse_args_stats(*args, **kwds) mean = self.mean(*args, **kwds) scale = extra_args[0] width", "be used to choose a definition of the resulting parameters that is suitable", "random variates \"\"\" # choose the receptor response characteristics res = random_log_uniform(1/s, s,", "other): \"\"\" performs Welch's t-test of two normal distributions \"\"\" # calculate the", "np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1) res = 1 - linalg.expm(x*Theta)[0, :].sum() return res", "axis=1) else: Theta = (np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1)) for i in np.flatnonzero(nz):", "np.log(0.25*(var_ratio + 1/var_ratio + 2)) term2 = (self.mean - other.mean)**2/(self.var + other.var) dist", "method == 'fenton': # use the moments directly return lognorm_mean_var(count * mean, count", "- np.sum(self._terms * factor) return res # ============================================================================== # OLD DISTRIBUTIONS THAT MIGHT", "s, frac): \"\"\" percent point function (inverse of cdf) \"\"\" s, frac =", "def _rvs(self, s, frac): \"\"\" random variates \"\"\" # choose the items response", "cumulative probability function \"\"\" s = s[0] # reset broadcasting res = np.zeros_like(x)", "welch_test(self, other): \"\"\" performs Welch's t-test of two normal distributions \"\"\" # calculate", "< s) res[idx] = 1/(x[idx] * np.log(s*s)) return res def _cdf(self, x, s):", "< s) log_s = np.log(s) res[idx] = (log_s + np.log(x[idx]))/(2 * log_s) res[x", "hypoexponential distribution. `rates` are the rates of the underlying exponential processes `method` determines", "- 1) - 1 - cv2 width = optimize.newton(_rhs, 1.1) return loguniform_mean(mean, np.sqrt(width))", "((self.count - 1)*self.var + (other.count - 1)*other.var) S = np.sqrt(expr/(self.count + other.count -", "= mean * np.exp(-0.5 * sigma**2) return stats.lognorm(scale=mu, s=sigma) def lognorm_mean_var(mean, variance): \"\"\"", "the distribution \"\"\" mean = (s**2 - 1)/(2*s*np.log(s)) var = ((s**4 - 1)", "delta/count M2 = M2 + delta*(value - mean) return NormalDistribution(mean, M2/(count - 1),", "count * variance) elif method == 'leastsq': # determine the moments from fitting", "frac != 1: res[np.random.random(self._size) > frac] = 0 return res def _pdf(self, x,", "cdf) \"\"\" s = s[0] # reset broadcasting res = np.zeros_like(q) idx =", "mean, var, None, None LogUniformDistribution = LogUniformDistribution_gen( a=0, name='LogUniformDistribution' ) class HypoExponentialDistribution(object): \"\"\"", "the rates of the underlying exponential processes `method` determines what method is used", "the distance between two normal distributions \"\"\" if kind == 'kullback-leibler': dist =", "= 1 self._terms = np.prod(mat, 1) def rvs(self, size): \"\"\" random variates \"\"\"", "self.mean)**2)/other.var - 1) elif kind == 'bhattacharyya': var_ratio = self.var/other.var term1 = np.log(0.25*(var_ratio", "sigma == 0: return DeterministicDistribution(mean) else: mu = mean * np.exp(-0.5 * sigma**2)", "!= 1: # switch off items randomly res[np.random.random(self._size) > frac] = 0 return", "`width`. The ratio between the maximal value and the minimal value is given", "a=0, name='PartialLogNormDistribution' ) class PartialLogUniformDistribution_gen(stats.rv_continuous): \"\"\" partial log-uniform distribution. a fraction `frac` of", "term2 = (self.mean - other.mean)**2/(self.var + other.var) dist = 0.25*(term1 + term2) elif", "# reset broadcasting res = np.zeros_like(x) idx = (1 < x*s) & (x", "0 return res def _pdf(self, x, s, frac): \"\"\" probability density function \"\"\"", "else: # other is sampled S = self.std else: if other.count is None:", "- self.mean)**2)/other.var - 1) elif kind == 'bhattacharyya': var_ratio = self.var/other.var term1 =", "s[0], frac[0] # reset broadcasting q_scale = (q - (1 - frac)) /", "= self.var/self.count, other.var/other.count nu1, nu2 = self.count - 1, other.count - 1 dof", "s = s[0] # reset broadcasting res = np.zeros_like(q) idx = (q >", "res # ============================================================================== # OLD DISTRIBUTIONS THAT MIGHT NOT BE NEEDED ANYMORE #", "match the coefficient of variation \"\"\" return 0.5 * (q + 1) *", "and adjust mean and variance of the distribution. This returns a new distribution", "evaluate the estimated pdf \"\"\" scale, sigma = params return stats.lognorm.pdf(xs, scale=scale, s=sigma)", "mean = self.mean + delta/count M2 = M2 + delta*(value - mean) return", "* sigma**2) return stats.lognorm(scale=mu, s=sigma) def lognorm_mean_var(mean, variance): \"\"\" returns a lognormal distribution", "mu, sigma elif definition == 'numpy': return np.log(mu), sigma else: raise ValueError('Unknown definition", "function (inverse of cdf) \"\"\" s = s[0] # reset broadcasting res =", "determine width parameter numerically cv2 = var / mean**2 # match square coefficient", "'sum': factor = np.exp(-x[nz, None] * self.rates[..., :]) \\ / self.rates[..., :] res[nz]", "of the distribution \"\"\" return self.alpha.sum() def variance(self): \"\"\" variance of the distribution", "> 0) res[idx] = s**(2*q_scale[idx] - 1) return res PartialLogUniformDistribution = PartialLogUniformDistribution_gen( a=0,", "+ other.count - 2)) delta = np.abs(self.mean - other.mean)/S return 2*stats.norm.cdf(-0.5*delta) else: #", "suitable for the given software package. \"\"\" mean2 = mean**2 mu = mean2/np.sqrt(mean2", "mean = dist.mean() var = dist.var() return (dist.moment(3) - 3*mean*var - mean**3) /", "<= 0): raise ValueError('All rates must be positive') if len(np.unique(self.alpha)) != len(self.alpha): raise", "Log-uniform distribution. \"\"\" def freeze(self, *args, **kwds): frozen = super(LogUniformDistribution_gen, self).freeze(*args, **kwds) frozen.support", "= lognorm_mean_var_to_mu_sigma(sum_mean, sum_var, 'scipy') params, _ = optimize.leastsq(pdf_diff, params_init) return params def lognorm_sum(count,", "uniformly in log space \"\"\" log_min, log_max = np.log(v_min), np.log(v_max) res = np.random.uniform(log_min,", "any location parameter \"\"\" def _rvs(self, s, frac): \"\"\" random variates \"\"\" #", "+ frac*(-0.5 + 0.5*special.erf(np.log(x)/(s*np.sqrt(2)))) def _ppf(self, q, s, frac): \"\"\" percent point function", "else: raise ValueError('Unknown definition `%s`' % definition) def lognorm_mean(mean, sigma): \"\"\" returns a", "Hypoexponential distribution. Unfortunately, the framework supplied by scipy.stats.rv_continuous does not support a variable", "a=alpha) def loguniform_mean(mean, width): \"\"\" returns a loguniform distribution parameterized by its mean", "remaining fraction `1 - frac` is zero \"\"\" def _rvs(self, s, frac): \"\"\"", "< 0, 0., 1.) def _stats(self): return 0., 0., 0., 0. def _rvs(self):", "* np.exp(-0.5*(value - mean)**2 / var) def add_observation(self, value): \"\"\" add an observed", "def pdf(self, value, mask=None): \"\"\" return probability density function at value \"\"\" if", "(q > 0) res[idx] = s**(2*q[idx] - 1) return res def _stats(self, s):", "< 0: raise ValueError('Variance must be positive') elif var == 0: # treat", "- other.mean)**2/(self.var + other.var) dist = 0.25*(term1 + term2) elif kind == 'hellinger':", "= (q > 0) res[idx] = s**(2*q[idx] - 1) return res def _stats(self,", "sigma = params return stats.lognorm.pdf(xs, scale=scale, s=sigma) - density # do the least", "mu = mean2/np.sqrt(mean2 + variance) sigma = np.sqrt(np.log(1 + variance/mean2)) if definition ==", "frozen.support = self.support(*args, **kwds) return frozen def support(self, *args, **kwds): \"\"\" return the", "frac): \"\"\" probability density function \"\"\" s, frac = s[0], frac[0] # reset", "* np.sqrt(sum_var) bins = np.linspace(0, val_max, bins + 1) xs = 0.5*(bins[:-1] +", "do the least square fitting params_init = lognorm_mean_var_to_mu_sigma(sum_mean, sum_var, 'scipy') params, _ =", "np.sqrt(self.var) def pdf(self, value, mask=None): \"\"\" return probability density function at value \"\"\"", "stats.lognorm.pdf(xs, scale=scale, s=sigma) - density # do the least square fitting params_init =", "a log-uniform distribution, while the remaining fraction `1 - frac` is zero \"\"\"", "= s**(2*q_scale[idx] - 1) return res PartialLogUniformDistribution = PartialLogUniformDistribution_gen( a=0, name='PartialLogUniformDistribution' ) NORMAL_DISTRIBUTION_NORMALIZATION", "frac[0] # reset broadcasting return 1 + frac*(-0.5 + 0.5*special.erf(np.log(x)/(s*np.sqrt(2)))) def _ppf(self, q,", "np.zeros_like(x) nz = (x > 0) if np.any(nz): factor = np.exp(-x[nz, None]*self.rates[..., :])", "np.log(s) res[idx] = (log_s + np.log(x[idx]))/(2 * log_s) res[x > s] = 1", "Theta = np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1) res = 1 - linalg.expm(x*Theta)[0, :].sum()", "a new distribution and only works if count was set \"\"\" if self.count", "v_max, size): \"\"\" returns random variables that a distributed uniformly in log space", "\"\"\" def _rvs(self, s, frac): \"\"\" random variates \"\"\" # choose the items", "= optimize.newton(_rhs, 1.1) return loguniform_mean(mean, np.sqrt(width)) def random_log_uniform(v_min, v_max, size): \"\"\" returns random", "lognorm_sum_leastsq(count, var_norm) return stats.lognorm(scale=scale * mean, s=sigma) else: raise ValueError('Unknown method `%s` for", "represent many distributions efficiently \"\"\" self.mean = mean self.var = var self.count =", "arrays to represent many distributions efficiently \"\"\" self.mean = mean self.var = var", "var, None, None LogUniformDistribution = LogUniformDistribution_gen( a=0, name='LogUniformDistribution' ) class HypoExponentialDistribution(object): \"\"\" Hypoexponential", "# do the least square fitting params_init = lognorm_mean_var_to_mu_sigma(sum_mean, sum_var, 'scipy') params, _", "log-normally distributed variables with `mean` and `variance`. The returned distribution is again log-normal", "return mu, sigma elif definition == 'numpy': return np.log(mu), sigma else: raise ValueError('Unknown", "\"\"\" percent point function (inverse of cdf) \"\"\" s, frac = s[0], frac[0]", "Student's T distribution prob = stats.t.sf(np.abs(t), dof) * 2 return prob def overlap(self,", "def lognorm_sum(count, mean, variance, method='fenton'): \"\"\" returns an estimate of the distribution of", "non-linear equation numerically, which might degrade accuracy and performance of the result \"\"\"", "s): \"\"\" calculates statistics of the distribution \"\"\" mean = (s**2 - 1)/(2*s*np.log(s))", "' 'lognormal distributions. Accepted methods are ' '[`fenton`, `leastsq`].') def gamma_mean_var(mean, variance): \"\"\"", "= 1/(x[idx] * np.log(s*s)) return res def _cdf(self, x, s): \"\"\" cumulative probability", "log-uniform distribution, while the remaining fraction `1 - frac` is zero \"\"\" def", "case separately return DeterministicDistribution(mean) else: scale = mean * (2*width*np.log(width)) / (width**2 -", "return DeterministicDistribution(mean) else: # determine width parameter numerically cv2 = var / mean**2", "if count was set \"\"\" if self.count is None: return self.copy() else: M2", "- mean)**2 / var) def add_observation(self, value): \"\"\" add an observed value and", "# calculate the degrees of freedom s1, s2 = self.var/self.count, other.var/other.count nu1, nu2", "optional parameter `definition` can be used to choose a definition of the resulting", "the remaining fraction `1 - frac` is zero Similar to the lognorm distribution,", "by its mean and a spread parameter `sigma` \"\"\" if sigma == 0:", "mean, var, count=None): \"\"\" normal distributions are described by their mean and variance.", "raise ValueError('Unknown definition `%s`' % definition) def lognorm_mean(mean, sigma): \"\"\" returns a lognormal", "self.mean mean = self.mean + delta/count M2 = M2 + delta*(value - mean)", "`%s`' % kind) return dist def welch_test(self, other): \"\"\" performs Welch's t-test of", "return res def _cdf(self, x, s, frac): \"\"\" cumulative probability function \"\"\" s,", "mean**3) / var**(3/2) class DeterministicDistribution_gen(stats.rv_continuous): \"\"\" deterministic distribution that always returns a given", "> s] = 1 return res def _ppf(self, q, s): \"\"\" percent point", "len(np.unique(self.alpha)) != len(self.alpha): raise ValueError('The current implementation only supports cases ' 'where all", "count def copy(self): return self.__class__(self.mean, self.var, self.count) @cached_property() def std(self): \"\"\" return standard", "res def cdf(self, x): \"\"\" cumulative density function \"\"\" if not np.isscalar(x): x", "random_log_uniform(1/s, s, self._size) # switch off receptors randomly if frac != 1: res[np.random.random(self._size)", "frac) + frac*res def _ppf(self, q, s, frac): \"\"\" percent point function (inverse", "var = dist.var() return (dist.moment(3) - 3*mean*var - mean**3) / var**(3/2) class DeterministicDistribution_gen(stats.rv_continuous):", "sim_terms=1e5, bins=64): \"\"\" returns the parameters of a log-normal distribution that estimates the", "dist.mean() var = dist.var() return (dist.moment(3) - 3*mean*var - mean**3) / var**(3/2) class", "location parameter \"\"\" def _rvs(self, s, frac): \"\"\" random variates \"\"\" # choose", "= LogUniformDistribution_gen( a=0, name='LogUniformDistribution' ) class HypoExponentialDistribution(object): \"\"\" Hypoexponential distribution. Unfortunately, the framework", "return NormalDistribution(mean, M2/(count - 1), count) def distance(self, other, kind='kullback-leibler'): \"\"\" return the", "yields a given mean and variance. The optional parameter `definition` can be used", "dist def welch_test(self, other): \"\"\" performs Welch's t-test of two normal distributions \"\"\"", "receptor response characteristics return random_log_uniform(1/s, s, self._size) def _pdf(self, x, s): \"\"\" probability", "- (s**2 - 1)**2) \\ / (4 * s**2 * np.log(s)**2) return mean,", "self.rates[..., :] res[nz] = np.sum(self._terms[..., :] * factor, axis=1) else: Theta = (np.diag(-self.rates,", "mean and variance. The optional parameter `definition` can be used to choose a", "while the remaining fraction `1 - frac` is zero Similar to the lognorm", "sum of `count` log-normally distributed variables with `mean` and `variance`. The returned distribution", "in np.flatnonzero(nz): res.flat[i] = \\ 1 - linalg.expm(x.flat[i]*Theta)[0, :].sum() elif x == 0:", "neither is sampled S = np.sqrt(0.5*(self.var + other.var)) else: # other is sampled", "return stats.lognorm.pdf(xs, scale=scale, s=sigma) - density # do the least square fitting params_init", "variables that a distributed uniformly in log space \"\"\" log_min, log_max = np.log(v_min),", "later with np.errstate(divide='ignore'): mat = self.alpha[:, None] \\ / (self.alpha[:, None] - self.alpha[None,", "the probability using the Student's T distribution prob = stats.t.sf(np.abs(t), dof) * 2", "+ s2) # calculate the probability using the Student's T distribution prob =", "return np.log(mu), sigma else: raise ValueError('Unknown definition `%s`' % definition) def lognorm_mean(mean, sigma):", "vals = dist.rvs((int(sim_terms), count)).sum(axis=1) # get the histogram val_max = sum_mean + 3", "reset broadcasting return frac / (s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2) def _cdf(self, x, s, frac):", "two moments of the distribution `leastsq` - minimize the error in the interval", "0., 0., 0., 0. def _rvs(self): return np.zeros(self._size) DeterministicDistribution = DeterministicDistribution_gen( name='DeterministicDistribution' )", "a definition of the resulting parameters that is suitable for the given software", "minimize the error in the interval \"\"\" if method == 'fenton': # use", "name='PartialLogUniformDistribution' ) NORMAL_DISTRIBUTION_NORMALIZATION = 1/np.sqrt(2*np.pi) class NormalDistribution(object): \"\"\" class representing normal distributions \"\"\"", "class HypoExponentialDistribution(object): \"\"\" Hypoexponential distribution. Unfortunately, the framework supplied by scipy.stats.rv_continuous does not", "def lognorm_mean_var(mean, variance): \"\"\" returns a lognormal distribution parameterized by its mean and", "= extra_args[0] width = mean * (2*scale*np.log(scale)) / (scale**2 - 1) return (width", "res[x > s] = 1 return (1 - frac) + frac*res def _ppf(self,", "frac = s[0], frac[0] # reset broadcasting q_scale = (q - (1 -", "0.25*(term1 + term2) elif kind == 'hellinger': dist_b = self.distance(other, kind='bhattacharyya') dist =", "nu1, nu2 = self.count - 1, other.count - 1 dof = (s1 +", "s[0] # reset broadcasting res = np.zeros_like(x) idx = (1 < x*s) &", "normal distributions \"\"\" def __init__(self, mean, var, count=None): \"\"\" normal distributions are described", "= np.asarray(rates) self.alpha = 1 / self.rates if np.any(rates <= 0): raise ValueError('All", "+ 3 * np.sqrt(sum_var) bins = np.linspace(0, val_max, bins + 1) xs =", "degrees of freedom s1, s2 = self.var/self.count, other.var/other.count nu1, nu2 = self.count -", "extra_args[0] width = mean * (2*scale*np.log(scale)) / (scale**2 - 1) return (width /", "can be used: `fenton` - match the first two moments of the distribution", "/ (scale**2 - 1) return (width / scale, width * scale) def _rvs(self,", "np.isscalar(x): x = np.asarray(x) res = np.zeros_like(x) nz = (x > 0) if", "frozen def support(self, *args, **kwds): \"\"\" return the interval in which the PDF", "bins = np.linspace(0, val_max, bins + 1) xs = 0.5*(bins[:-1] + bins[1:]) density,", "import division import numpy as np from scipy import stats, special, linalg, optimize", "1) xs = 0.5*(bins[:-1] + bins[1:]) density, _ = np.histogram(vals, bins=bins, range=[0, val_max],", "optimize.leastsq(pdf_diff, params_init) return params def lognorm_sum(count, mean, variance, method='fenton'): \"\"\" returns an estimate", "/ mean**2 scale, sigma = lognorm_sum_leastsq(count, var_norm) return stats.lognorm(scale=scale * mean, s=sigma) else:", "scale) def _rvs(self, s): \"\"\" random variates \"\"\" # choose the receptor response", "1, other.count - 1 dof = (s1 + s2)**2/(s1**2/nu1 + s2**2/nu2) # calculate", "other.std else: # both are sampled expr = ((self.count - 1)*self.var + (other.count", "method == 'leastsq': # determine the moments from fitting var_norm = variance /", "*args, **kwds): frozen = super(LogUniformDistribution_gen, self).freeze(*args, **kwds) frozen.support = self.support(*args, **kwds) return frozen", "cv2 width = optimize.newton(_rhs, 1.1) return loguniform_mean(mean, np.sqrt(width)) def random_log_uniform(v_min, v_max, size): \"\"\"", "support a variable number of parameters and we thus only mimic its interface", "(2*width*np.log(width)) / (width**2 - 1) return LogUniformDistribution(scale=scale, s=width) def loguniform_mean_var(mean, var): \"\"\" returns", "+ 1 delta = value - self.mean mean = self.mean + delta/count M2", "follows a log-normal distribution, while the remaining fraction `1 - frac` is zero", "PartialLogNormDistribution_gen( a=0, name='PartialLogNormDistribution' ) class PartialLogUniformDistribution_gen(stats.rv_continuous): \"\"\" partial log-uniform distribution. a fraction `frac`", "sigma elif definition == 'numpy': return np.log(mu), sigma else: raise ValueError('Unknown definition `%s`'", "= s[0], frac[0] # reset broadcasting return 1 + frac*(-0.5 + 0.5*special.erf(np.log(x)/(s*np.sqrt(2)))) def", "broadcasting return 1 + frac*(-0.5 + 0.5*special.erf(np.log(x)/(s*np.sqrt(2)))) def _ppf(self, q, s, frac): \"\"\"", "class DeterministicDistribution_gen(stats.rv_continuous): \"\"\" deterministic distribution that always returns a given value Code copied", "if self.count is None: return self.copy() else: M2 = self.var*(self.count - 1) count", "a fraction `frac` of the distribution follows a log-uniform distribution, while the remaining", "= (1 < x*s) & (x < s) res[idx] = frac/(x[idx] * np.log(s*s))", "frac): \"\"\" random variates \"\"\" # choose the receptor response characteristics res =", "0) if np.any(nz): if self.method == 'sum': factor = np.exp(-x[nz, None] * self.rates[...,", "be numpy arrays to represent many distributions efficiently \"\"\" self.mean = mean self.var", "Created on Feb 24, 2015 @author: <NAME> <<EMAIL>> This module provides functions and", "def dist_skewness(dist): \"\"\" returns the skewness of the distribution `dist` \"\"\" mean =", "= np.exp(-x[nz, None]*self.rates[..., :]) res = 1 - np.sum(self._terms[..., :] * factor, axis=1)", "between two normal distributions \"\"\" if kind == 'kullback-leibler': dist = 0.5*(np.log(other.var/self.var) +", "spread parameter `width`. The ratio between the maximal value and the minimal value", "is zero Similar to the lognorm distribution, this does not support any location", "x == 0: res = 0 else: factor = np.exp(-x*self.rates) res = 1", "ANYMORE # ============================================================================== class PartialLogNormDistribution_gen(stats.rv_continuous): \"\"\" partial log-normal distribution. a fraction `frac` of", "1) * np.log(q) / (q - 1) - 1 - cv2 width =", "a=0, name='PartialLogUniformDistribution' ) NORMAL_DISTRIBUTION_NORMALIZATION = 1/np.sqrt(2*np.pi) class NormalDistribution(object): \"\"\" class representing normal distributions", "what method is used for calculating the cdf and can be either `sum`", "the receptor response characteristics res = random_log_uniform(1/s, s, self._size) # switch off receptors", "bins[1:]) density, _ = np.histogram(vals, bins=bins, range=[0, val_max], density=True) def pdf_diff(params): \"\"\" evaluate", "- frac)) / frac res = np.zeros_like(q) idx = (q_scale > 0) res[idx]", "- 1) count = self.count + 1 delta = value - self.mean mean", "variation \"\"\" return 0.5 * (q + 1) * np.log(q) / (q -", "== 0] = 1 self._terms = np.prod(mat, 1) def rvs(self, size): \"\"\" random", "cumulative density function \"\"\" if not np.isscalar(x): x = np.asarray(x) res = np.zeros_like(x)", "t-value t = (self.mean - other.mean)/np.sqrt(s1 + s2) # calculate the probability using", "\"\"\" calculates statistics of the distribution \"\"\" mean = (s**2 - 1)/(2*s*np.log(s)) var", "# prepare the rates of the system self.rates = np.asarray(rates) self.alpha = 1", "returns a gamma distribution with given mean and variance \"\"\" alpha = mean**2", "== 'scipy': return mu, sigma elif definition == 'numpy': return np.log(mu), sigma else:", "not support a variable number of parameters and we thus only mimic its", "the distribution is non-zero \"\"\" extra_args, _, _, _ = self._parse_args_stats(*args, **kwds) mean", "self._parse_args_stats(*args, **kwds) mean = self.mean(*args, **kwds) scale = extra_args[0] width = mean *", "a fraction `frac` of the distribution follows a log-normal distribution, while the remaining", "log-normal with mean and variance determined from the given parameters. Here, several methods", "DISTRIBUTIONS THAT MIGHT NOT BE NEEDED ANYMORE # ============================================================================== class PartialLogNormDistribution_gen(stats.rv_continuous): \"\"\" partial", "parameters. Here, several methods can be used: `fenton` - match the first two", "lognorm_mean_var(mean, variance): \"\"\" returns a lognormal distribution parameterized by its mean and its", "its interface here. \"\"\" def __init__(self, rates, method='sum'): \"\"\" initializes the hypoexponential distribution.", "of freedom s1, s2 = self.var/self.count, other.var/other.count nu1, nu2 = self.count - 1,", "\"\"\" estimates the amount of overlap between two distributions \"\"\" if common_variance: if", "its mean and a spread parameter `sigma` \"\"\" if sigma == 0: return", "# determine width parameter numerically cv2 = var / mean**2 # match square", "def _stats(self): return 0., 0., 0., 0. def _rvs(self): return np.zeros(self._size) DeterministicDistribution =", "`fenton` - match the first two moments of the distribution `leastsq` - minimize", "mean * (2*width*np.log(width)) / (width**2 - 1) return LogUniformDistribution(scale=scale, s=width) def loguniform_mean_var(mean, var):", "variance. The optional parameter `definition` can be used to choose a definition of", "mean and its variance. \"\"\" if variance == 0: return DeterministicDistribution(mean) else: scale,", "np.exp(-0.5 * sigma**2) return stats.lognorm(scale=mu, s=sigma) def lognorm_mean_var(mean, variance): \"\"\" returns a lognormal", "= random_log_uniform(1/s, s, self._size) # switch off receptors randomly if frac != 1:", "var_ratio = self.var/other.var term1 = np.log(0.25*(var_ratio + 1/var_ratio + 2)) term2 = (self.mean", "switch off receptors randomly if frac != 1: res[np.random.random(self._size) > frac] = 0", "the given software package. \"\"\" mean2 = mean**2 mu = mean2/np.sqrt(mean2 + variance)", "= np.log(v_min), np.log(v_max) res = np.random.uniform(log_min, log_max, size) return np.exp(res) def dist_skewness(dist): \"\"\"", "the sum of ' 'lognormal distributions. Accepted methods are ' '[`fenton`, `leastsq`].') def", "x): return np.where(x < 0, 0., 1.) def _stats(self): return 0., 0., 0.,", "works if count was set \"\"\" if self.count is None: return self.copy() else:", "given parameters. Here, several methods can be used: `fenton` - match the first", "random variables with mean 1 and variance `var_norm`. These parameters are determined by", "returns the skewness of the distribution `dist` \"\"\" mean = dist.mean() var =", "count was set \"\"\" if self.count is None: return self.copy() else: M2 =", "in self.alpha) def mean(self): \"\"\" mean of the distribution \"\"\" return self.alpha.sum() def", "value is given by width**2 \"\"\" if width == 1: # treat special", "= lognorm_sum_leastsq(count, var_norm) return stats.lognorm(scale=scale * mean, s=sigma) else: raise ValueError('Unknown method `%s`", "\"\"\" returns a lognormal distribution parameterized by its mean and a spread parameter", "0: return DeterministicDistribution(mean) else: scale, sigma = lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy') return stats.lognorm(scale=scale, s=sigma)", "# choose the receptor response characteristics res = random_log_uniform(1/s, s, self._size) # switch", "definition) def lognorm_mean(mean, sigma): \"\"\" returns a lognormal distribution parameterized by its mean", "np.exp(-x*self.rates) res = 1 - np.sum(self._terms * factor) return res # ============================================================================== #", "= 0.5*(np.log(other.var/self.var) + (self.var + (self.mean - self.mean)**2)/other.var - 1) elif kind ==", "variance(self): \"\"\" variance of the distribution \"\"\" return (2 * np.sum(self.alpha**2 * self._terms)", "return np.sqrt(self.var) def pdf(self, value, mask=None): \"\"\" return probability density function at value", "representing normal distributions \"\"\" def __init__(self, mean, var, count=None): \"\"\" normal distributions are", "distributions \"\"\" if kind == 'kullback-leibler': dist = 0.5*(np.log(other.var/self.var) + (self.var + (self.mean", "None: if other.count is None: # neither is sampled S = np.sqrt(0.5*(self.var +", "s, frac): \"\"\" cumulative probability function \"\"\" s, frac = s[0], frac[0] #", "- other.mean)/np.sqrt(s1 + s2) # calculate the probability using the Student's T distribution", "== 'sum': factor = np.exp(-x[nz, None] * self.rates[..., :]) \\ / self.rates[..., :]", "the minimal value is given by width**2 \"\"\" if width == 1: #", "self.mean = mean self.var = var self.count = count def copy(self): return self.__class__(self.mean,", "estimate of the distribution of the sum of `count` log-normally distributed variables with", "the receptor response characteristics return random_log_uniform(1/s, s, self._size) def _pdf(self, x, s): \"\"\"", "supplied by scipy.stats.rv_continuous does not support a variable number of parameters and we", "(1 < x*s) & (x < s) log_s = np.log(s) res[idx] = (log_s", "the distribution follows a log-uniform distribution, while the remaining fraction `1 - frac`", "= frac/(x[idx] * np.log(s*s)) return res def _cdf(self, x, s, frac): \"\"\" cumulative", "None: # self is sampled S = other.std else: # both are sampled", "def add_observation(self, value): \"\"\" add an observed value and adjust mean and variance", "\"\"\" # choose the items response characteristics res = np.exp(s * np.random.standard_normal(self._size)) if", "(q_scale > 0) res[idx] = s**(2*q_scale[idx] - 1) return res PartialLogUniformDistribution = PartialLogUniformDistribution_gen(", "its mean and its variance. \"\"\" if variance == 0: return DeterministicDistribution(mean) else:", "* self._terms) - (self.alpha.sum())**2) def pdf(self, x): \"\"\" probability density function \"\"\" if", "the moments directly return lognorm_mean_var(count * mean, count * variance) elif method ==", "scipy.stats package and extend it. ''' from __future__ import division import numpy as", "def welch_test(self, other): \"\"\" performs Welch's t-test of two normal distributions \"\"\" #", "axis=1) elif x == 0: res = 0 else: factor = np.exp(-x*self.rates) res", "- self.alpha[None, :]) == 0] = 1 self._terms = np.prod(mat, 1) def rvs(self,", "- 1) elif kind == 'bhattacharyya': var_ratio = self.var/other.var term1 = np.log(0.25*(var_ratio +", "super(LogUniformDistribution_gen, self).freeze(*args, **kwds) frozen.support = self.support(*args, **kwds) return frozen def support(self, *args, **kwds):", "distribution. Unfortunately, the framework supplied by scipy.stats.rv_continuous does not support a variable number", "Accepted methods are ' '[`fenton`, `leastsq`].') def gamma_mean_var(mean, variance): \"\"\" returns a gamma", "sigma else: raise ValueError('Unknown definition `%s`' % definition) def lognorm_mean(mean, sigma): \"\"\" returns", "reset broadcasting res = np.zeros_like(x) idx = (1 < x*s) & (x <", "a variable number of parameters and we thus only mimic its interface here.", "0) + np.diag(self.rates[:-1], 1)) for i in np.flatnonzero(nz): res.flat[i] = \\ 1 -", "np.any(nz): if self.method == 'sum': factor = np.exp(-x[nz, None] * self.rates[..., :]) \\", "from ..data_structures.cache import cached_property def lognorm_mean_var_to_mu_sigma(mean, variance, definition='scipy'): \"\"\" determines the parameters of", "s1, s2 = self.var/self.count, other.var/other.count nu1, nu2 = self.count - 1, other.count -", "+ s2**2/nu2) # calculate the Welch t-value t = (self.mean - other.mean)/np.sqrt(s1 +", "res = 1 - np.sum(self._terms[..., :] * factor, axis=1) elif x == 0:", "== 'sum': factor = np.exp(-x*self.rates)/self.ratesx res[nz] = np.sum(self._terms * factor) else: Theta =", "(q + 1) * np.log(q) / (q - 1) - 1 - cv2", "distribution \"\"\" return (2 * np.sum(self.alpha**2 * self._terms) - (self.alpha.sum())**2) def pdf(self, x):", "initializes the hypoexponential distribution. `rates` are the rates of the underlying exponential processes", "`count` log-normally distributed random variables with mean 1 and variance `var_norm`. These parameters", "estimates the sum of `count` log-normally distributed random variables with mean 1 and", "underlying exponential processes `method` determines what method is used for calculating the cdf", "sampled S = np.sqrt(0.5*(self.var + other.var)) else: # other is sampled S =", "s=sigma) def lognorm_sum_leastsq(count, var_norm, sim_terms=1e5, bins=64): \"\"\" returns the parameters of a log-normal", "self.std[mask] return NORMAL_DISTRIBUTION_NORMALIZATION/std \\ * np.exp(-0.5*(value - mean)**2 / var) def add_observation(self, value):", "get the histogram val_max = sum_mean + 3 * np.sqrt(sum_var) bins = np.linspace(0,", "@cached_property() def std(self): \"\"\" return standard deviation \"\"\" return np.sqrt(self.var) def pdf(self, value,", "sigma): \"\"\" returns a lognormal distribution parameterized by its mean and a spread", "cumulative probability function \"\"\" s, frac = s[0], frac[0] # reset broadcasting res", "case separately return DeterministicDistribution(mean) else: # determine width parameter numerically cv2 = var", "x): \"\"\" cumulative density function \"\"\" if not np.isscalar(x): x = np.asarray(x) res", "density function \"\"\" s = s[0] # reset broadcasting res = np.zeros_like(x) idx", "dist_b = self.distance(other, kind='bhattacharyya') dist = np.sqrt(1 - np.exp(-dist_b)) else: raise ValueError('Unknown distance", "a histogram obtained by drawing `sim_terms` random numbers \"\"\" sum_mean = count sum_var", "res[idx] = (log_s + np.log(x[idx]))/(2 * log_s) res[x > s] = 1 return", "res[idx] = np.exp(s * special.ndtri(q_scale[idx])) return res PartialLogNormDistribution = PartialLogNormDistribution_gen( a=0, name='PartialLogNormDistribution' )", "\\ / (4 * s**2 * np.log(s)**2) return mean, var, None, None LogUniformDistribution", "\\ / self.rates[..., :] res[nz] = np.sum(self._terms[..., :] * factor, axis=1) else: Theta", "returns a given value Code copied from https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous \"\"\" def _cdf(self, x): return", "np.zeros_like(q) idx = (q > 0) res[idx] = s**(2*q[idx] - 1) return res", "interval \"\"\" if method == 'fenton': # use the moments directly return lognorm_mean_var(count", "- frac) + frac*res def _ppf(self, q, s, frac): \"\"\" percent point function", "= self.mean var = self.var std = self.std else: mean = self.mean[mask] var", "count)).sum(axis=1) # get the histogram val_max = sum_mean + 3 * np.sqrt(sum_var) bins", "and variance `var_norm`. These parameters are determined by fitting the probability density function", "if common_variance: if self.count is None: if other.count is None: # neither is", "ValueError('The current implementation only supports cases ' 'where all rates are different from", "DeterministicDistribution(mean) else: scale = mean * (2*width*np.log(width)) / (width**2 - 1) return LogUniformDistribution(scale=scale,", "0.5*(bins[:-1] + bins[1:]) density, _ = np.histogram(vals, bins=bins, range=[0, val_max], density=True) def pdf_diff(params):", "characteristics res = np.exp(s * np.random.standard_normal(self._size)) if frac != 1: # switch off", "<<EMAIL>> This module provides functions and classes for probability distributions, which build upon", "s[0], frac[0] # reset broadcasting return 1 + frac*(-0.5 + 0.5*special.erf(np.log(x)/(s*np.sqrt(2)))) def _ppf(self,", "var): \"\"\" returns a loguniform distribution parameterized by its mean and variance. Here,", "return sum(np.random.exponential(scale=alpha, size=size) for alpha in self.alpha) def mean(self): \"\"\" mean of the", "distribution, this does not support any location parameter \"\"\" def _rvs(self, s, frac):", "always returns a given value Code copied from https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous \"\"\" def _cdf(self, x):", "choose a definition of the resulting parameters that is suitable for the given", "distribution. a fraction `frac` of the distribution follows a log-uniform distribution, while the", "return res PartialLogUniformDistribution = PartialLogUniformDistribution_gen( a=0, name='PartialLogUniformDistribution' ) NORMAL_DISTRIBUTION_NORMALIZATION = 1/np.sqrt(2*np.pi) class NormalDistribution(object):", "many distributions efficiently \"\"\" self.mean = mean self.var = var self.count = count", "= (s1 + s2)**2/(s1**2/nu1 + s2**2/nu2) # calculate the Welch t-value t =", "frac[0] # reset broadcasting res = np.zeros_like(x) idx = (1 < x*s) &", "with given mean and variance \"\"\" alpha = mean**2 / variance beta =", "* np.log(s*s)) return res def _cdf(self, x, s): \"\"\" cumulative probability function \"\"\"", "Here, several methods can be used: `fenton` - match the first two moments", "+ frac*res def _ppf(self, q, s, frac): \"\"\" percent point function (inverse of", "= 1/np.sqrt(2*np.pi) class NormalDistribution(object): \"\"\" class representing normal distributions \"\"\" def __init__(self, mean,", "= ((self.count - 1)*self.var + (other.count - 1)*other.var) S = np.sqrt(expr/(self.count + other.count", "LogUniformDistribution = LogUniformDistribution_gen( a=0, name='LogUniformDistribution' ) class HypoExponentialDistribution(object): \"\"\" Hypoexponential distribution. Unfortunately, the", "number of parameters and we thus only mimic its interface here. \"\"\" def", "deterministic distribution that always returns a given value Code copied from https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous \"\"\"", "# choose the items response characteristics res = np.exp(s * np.random.standard_normal(self._size)) if frac", "= s[0], frac[0] # reset broadcasting return frac / (s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2) def", "distribution and only works if count was set \"\"\" if self.count is None:", "\"\"\" initializes the hypoexponential distribution. `rates` are the rates of the underlying exponential", "return res def _stats(self, s): \"\"\" calculates statistics of the distribution \"\"\" mean", "https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous \"\"\" def _cdf(self, x): return np.where(x < 0, 0., 1.) def _stats(self):", "= params return stats.lognorm.pdf(xs, scale=scale, s=sigma) - density # do the least square", "if definition == 'scipy': return mu, sigma elif definition == 'numpy': return np.log(mu),", "_rvs(self, s): \"\"\" random variates \"\"\" # choose the receptor response characteristics return", "= 0 return res def _pdf(self, x, s, frac): \"\"\" probability density function", "None, None LogUniformDistribution = LogUniformDistribution_gen( a=0, name='LogUniformDistribution' ) class HypoExponentialDistribution(object): \"\"\" Hypoexponential distribution.", "to estimate the parameters. All values can also be numpy arrays to represent", "def _cdf(self, x): return np.where(x < 0, 0., 1.) def _stats(self): return 0.,", "treat special case separately return DeterministicDistribution(mean) else: scale = mean * (2*width*np.log(width)) /", "# calculate terms that we need later with np.errstate(divide='ignore'): mat = self.alpha[:, None]", "and we thus only mimic its interface here. \"\"\" def __init__(self, rates, method='sum'):", "is again log-normal with mean and variance determined from the given parameters. Here,", "= (1 < x*s) & (x < s) res[idx] = 1/(x[idx] * np.log(s*s))", "\"\"\" returns a lognormal distribution parameterized by its mean and its variance. \"\"\"", "parameters are determined by fitting the probability density function to a histogram obtained", "(s**2 - 1)/(2*s*np.log(s)) var = ((s**4 - 1) * np.log(s) - (s**2 -", "and can be either `sum` or `eigen` \"\"\" if method in {'sum', 'eigen'}:", "if kind == 'kullback-leibler': dist = 0.5*(np.log(other.var/self.var) + (self.var + (self.mean - self.mean)**2)/other.var", "= np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1) res = 1 - linalg.expm(x*Theta)[0, :].sum() return", "distribution \"\"\" mean = (s**2 - 1)/(2*s*np.log(s)) var = ((s**4 - 1) *", "- cv2 width = optimize.newton(_rhs, 1.1) return loguniform_mean(mean, np.sqrt(width)) def random_log_uniform(v_min, v_max, size):", "match square coefficient of variation def _rhs(q): \"\"\" match the coefficient of variation", "log-normal distribution such that the distribution yields a given mean and variance. The", "% definition) def lognorm_mean(mean, sigma): \"\"\" returns a lognormal distribution parameterized by its", "of the distribution of the sum of `count` log-normally distributed variables with `mean`", "return self.alpha.sum() def variance(self): \"\"\" variance of the distribution \"\"\" return (2 *", "thus only mimic its interface here. \"\"\" def __init__(self, rates, method='sum'): \"\"\" initializes", "square fitting params_init = lognorm_mean_var_to_mu_sigma(sum_mean, sum_var, 'scipy') params, _ = optimize.leastsq(pdf_diff, params_init) return", "\"\"\" partial log-normal distribution. a fraction `frac` of the distribution follows a log-normal", "rates must be positive') if len(np.unique(self.alpha)) != len(self.alpha): raise ValueError('The current implementation only", "= np.sqrt(0.5*(self.var + other.var)) else: # other is sampled S = self.std else:", "mean**2 scale, sigma = lognorm_sum_leastsq(count, var_norm) return stats.lognorm(scale=scale * mean, s=sigma) else: raise", "width == 1: # treat special case separately return DeterministicDistribution(mean) else: scale =", "sampled expr = ((self.count - 1)*self.var + (other.count - 1)*other.var) S = np.sqrt(expr/(self.count", "return res # ============================================================================== # OLD DISTRIBUTIONS THAT MIGHT NOT BE NEEDED ANYMORE", "`1 - frac` is zero \"\"\" def _rvs(self, s, frac): \"\"\" random variates", "= self.mean[mask] var = self.var[mask] std = self.std[mask] return NORMAL_DISTRIBUTION_NORMALIZATION/std \\ * np.exp(-0.5*(value", "+ 1/var_ratio + 2)) term2 = (self.mean - other.mean)**2/(self.var + other.var) dist =", "return LogUniformDistribution(scale=scale, s=width) def loguniform_mean_var(mean, var): \"\"\" returns a loguniform distribution parameterized by", "value, mask=None): \"\"\" return probability density function at value \"\"\" if mask is", "between two distributions \"\"\" if common_variance: if self.count is None: if other.count is", "frac[0] # reset broadcasting return frac / (s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2) def _cdf(self, x,", "can also be numpy arrays to represent many distributions efficiently \"\"\" self.mean =", "lognorm_sum(count, mean, variance, method='fenton'): \"\"\" returns an estimate of the distribution of the", "given software package. \"\"\" mean2 = mean**2 mu = mean2/np.sqrt(mean2 + variance) sigma", "* self.rates[..., :]) \\ / self.rates[..., :] res[nz] = np.sum(self._terms[..., :] * factor,", "+ other.var) dist = 0.25*(term1 + term2) elif kind == 'hellinger': dist_b =", "the moments from fitting var_norm = variance / mean**2 scale, sigma = lognorm_sum_leastsq(count,", "other.count is None: # neither is sampled S = np.sqrt(0.5*(self.var + other.var)) else:", "\"\"\" def _cdf(self, x): return np.where(x < 0, 0., 1.) def _stats(self): return", "function \"\"\" s = s[0] # reset broadcasting res = np.zeros_like(x) idx =", "% kind) return dist def welch_test(self, other): \"\"\" performs Welch's t-test of two", "broadcasting q_scale = (q - (1 - frac)) / frac res = np.zeros_like(q)", "= np.linspace(0, val_max, bins + 1) xs = 0.5*(bins[:-1] + bins[1:]) density, _", "elif method == 'leastsq': # determine the moments from fitting var_norm = variance", "random_log_uniform(1/s, s, self._size) def _pdf(self, x, s): \"\"\" probability density function \"\"\" s", "q, s, frac): \"\"\" percent point function (inverse of cdf) \"\"\" s, frac", "other.var)) else: # other is sampled S = self.std else: if other.count is", "PDF of the distribution is non-zero \"\"\" extra_args, _, _, _ = self._parse_args_stats(*args,", "np.exp(res) def dist_skewness(dist): \"\"\" returns the skewness of the distribution `dist` \"\"\" mean", "THAT MIGHT NOT BE NEEDED ANYMORE # ============================================================================== class PartialLogNormDistribution_gen(stats.rv_continuous): \"\"\" partial log-normal", "a distributed uniformly in log space \"\"\" log_min, log_max = np.log(v_min), np.log(v_max) res", "scale, sigma = params return stats.lognorm.pdf(xs, scale=scale, s=sigma) - density # do the", "lognormal distribution parameterized by its mean and its variance. \"\"\" if variance ==", "frac / (s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2) def _cdf(self, x, s, frac): \"\"\" cumulative probability", "and variance. The optional parameter `definition` can be used to choose a definition", "parameters and we thus only mimic its interface here. \"\"\" def __init__(self, rates,", "raise ValueError('The current implementation only supports cases ' 'where all rates are different", "def _rhs(q): \"\"\" match the coefficient of variation \"\"\" return 0.5 * (q", "1), count) def distance(self, other, kind='kullback-leibler'): \"\"\" return the distance between two normal", "lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy') return stats.lognorm(scale=scale, s=sigma) def lognorm_sum_leastsq(count, var_norm, sim_terms=1e5, bins=64): \"\"\" returns", "= dist.var() return (dist.moment(3) - 3*mean*var - mean**3) / var**(3/2) class DeterministicDistribution_gen(stats.rv_continuous): \"\"\"", "np.exp(-1/2*(np.log(x)/s)**2) def _cdf(self, x, s, frac): \"\"\" cumulative probability function \"\"\" s, frac", "and variance. Additionally, count denotes how many observations were used to estimate the", "self.var std = self.std else: mean = self.mean[mask] var = self.var[mask] std =", "x, s): \"\"\" probability density function \"\"\" s = s[0] # reset broadcasting", "the amount of overlap between two distributions \"\"\" if common_variance: if self.count is", "# reset broadcasting return frac / (s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2) def _cdf(self, x, s,", ") NORMAL_DISTRIBUTION_NORMALIZATION = 1/np.sqrt(2*np.pi) class NormalDistribution(object): \"\"\" class representing normal distributions \"\"\" def", "module provides functions and classes for probability distributions, which build upon the scipy.stats", "error in the interval \"\"\" if method == 'fenton': # use the moments", "x, s, frac): \"\"\" probability density function \"\"\" s, frac = s[0], frac[0]", "- 1)*other.var) S = np.sqrt(expr/(self.count + other.count - 2)) delta = np.abs(self.mean -", "else: if other.count is None: # self is sampled S = other.std else:", "0) if np.any(nz): factor = np.exp(-x[nz, None]*self.rates[..., :]) res = 1 - np.sum(self._terms[...,", "var, count=None): \"\"\" normal distributions are described by their mean and variance. Additionally,", "var) def add_observation(self, value): \"\"\" add an observed value and adjust mean and", "function \"\"\" if not np.isscalar(x): x = np.asarray(x) res = np.zeros_like(x) nz =", "function to a histogram obtained by drawing `sim_terms` random numbers \"\"\" sum_mean =", "dist = lognorm_mean_var(1, var_norm) vals = dist.rvs((int(sim_terms), count)).sum(axis=1) # get the histogram val_max", "- frac` is zero \"\"\" def _rvs(self, s, frac): \"\"\" random variates \"\"\"", "s, frac = s[0], frac[0] # reset broadcasting return frac / (s*x*np.sqrt(2*np.pi)) *", "and variance of the distribution. This returns a new distribution and only works", "dist = 0.25*(term1 + term2) elif kind == 'hellinger': dist_b = self.distance(other, kind='bhattacharyya')", "special case separately return DeterministicDistribution(mean) else: scale = mean * (2*width*np.log(width)) / (width**2", "ValueError('Unknown definition `%s`' % definition) def lognorm_mean(mean, sigma): \"\"\" returns a lognormal distribution", "count denotes how many observations were used to estimate the parameters. All values", "= np.sum(self._terms[..., :] * factor, axis=1) else: Theta = (np.diag(-self.rates, 0) + np.diag(self.rates[:-1],", "np.log(mu), sigma else: raise ValueError('Unknown definition `%s`' % definition) def lognorm_mean(mean, sigma): \"\"\"", "sampled S = self.std else: if other.count is None: # self is sampled", "variance. Additionally, count denotes how many observations were used to estimate the parameters.", "' '[`fenton`, `leastsq`].') def gamma_mean_var(mean, variance): \"\"\" returns a gamma distribution with given", "distribution prob = stats.t.sf(np.abs(t), dof) * 2 return prob def overlap(self, other, common_variance=True):", "def _stats(self, s): \"\"\" calculates statistics of the distribution \"\"\" mean = (s**2", "PartialLogUniformDistribution_gen(stats.rv_continuous): \"\"\" partial log-uniform distribution. a fraction `frac` of the distribution follows a", "else: M2 = self.var*(self.count - 1) count = self.count + 1 delta =", "= self._parse_args_stats(*args, **kwds) mean = self.mean(*args, **kwds) scale = extra_args[0] width = mean", "frac != 1: # switch off items randomly res[np.random.random(self._size) > frac] = 0", "log_s) res[x > s] = 1 return (1 - frac) + frac*res def", "from fitting var_norm = variance / mean**2 scale, sigma = lognorm_sum_leastsq(count, var_norm) return", "**kwds) return frozen def support(self, *args, **kwds): \"\"\" return the interval in which", "broadcasting res = np.zeros_like(q) idx = (q > 0) res[idx] = s**(2*q[idx] -", "var / mean**2 # match square coefficient of variation def _rhs(q): \"\"\" match", "= np.asarray(x) res = np.zeros_like(x) nz = (x > 0) if np.any(nz): if", "S = np.sqrt(expr/(self.count + other.count - 2)) delta = np.abs(self.mean - other.mean)/S return", "rates of the system self.rates = np.asarray(rates) self.alpha = 1 / self.rates if", "s[0], frac[0] # reset broadcasting res = np.zeros_like(x) idx = (1 < x*s)", "maximal value and the minimal value is given by width**2 \"\"\" if width", "= np.exp(-x[nz, None] * self.rates[..., :]) \\ / self.rates[..., :] res[nz] = np.sum(self._terms[...,", "= (q - (1 - frac)) / frac res = np.zeros_like(q) idx =", "var < 0: raise ValueError('Variance must be positive') elif var == 0: #", "def lognorm_sum_leastsq(count, var_norm, sim_terms=1e5, bins=64): \"\"\" returns the parameters of a log-normal distribution", "\"\"\" s = s[0] # reset broadcasting res = np.zeros_like(q) idx = (q", "= np.sum(self._terms * factor) else: Theta = np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1) res", "# other is sampled S = self.std else: if other.count is None: #", "frac = s[0], frac[0] # reset broadcasting return 1 + frac*(-0.5 + 0.5*special.erf(np.log(x)/(s*np.sqrt(2))))", "mean(self): \"\"\" mean of the distribution \"\"\" return self.alpha.sum() def variance(self): \"\"\" variance", "== 0: res = 0 else: factor = np.exp(-x*self.rates) res = 1 -", "between the maximal value and the minimal value is given by width**2 \"\"\"", "= ((s**4 - 1) * np.log(s) - (s**2 - 1)**2) \\ / (4", "returns an estimate of the distribution of the sum of `count` log-normally distributed", "/ (self.alpha[:, None] - self.alpha[None, :]) mat[(self.alpha[:, None] - self.alpha[None, :]) == 0]", "dof) * 2 return prob def overlap(self, other, common_variance=True): \"\"\" estimates the amount", "# reset broadcasting q_scale = (q - (1 - frac)) / frac res", "s=sigma) else: raise ValueError('Unknown method `%s` for determining the sum of ' 'lognormal", "{'sum', 'eigen'}: self.method = method # prepare the rates of the system self.rates", "\"\"\" return probability density function at value \"\"\" if mask is None: mean", "* factor) else: Theta = np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1) res = 1", "the items response characteristics res = np.exp(s * np.random.standard_normal(self._size)) if frac != 1:", "# choose the receptor response characteristics return sum(np.random.exponential(scale=alpha, size=size) for alpha in self.alpha)", "probability using the Student's T distribution prob = stats.t.sf(np.abs(t), dof) * 2 return", "s): \"\"\" cumulative probability function \"\"\" s = s[0] # reset broadcasting res", "frac)) / frac res = np.zeros_like(q) idx = (q_scale > 0) res[idx] =", "- 2)) delta = np.abs(self.mean - other.mean)/S return 2*stats.norm.cdf(-0.5*delta) else: # here, we", "if frac != 1: res[np.random.random(self._size) > frac] = 0 return res def _pdf(self,", "return probability density function at value \"\"\" if mask is None: mean =", "variance) elif method == 'leastsq': # determine the moments from fitting var_norm =", "factor = np.exp(-x*self.rates)/self.ratesx res[nz] = np.sum(self._terms * factor) else: Theta = np.diag(-self.rates, 0)", "np.exp(-0.5*(value - mean)**2 / var) def add_observation(self, value): \"\"\" add an observed value", "(inverse of cdf) \"\"\" s = s[0] # reset broadcasting res = np.zeros_like(q)", "\"\"\" s, frac = s[0], frac[0] # reset broadcasting res = np.zeros_like(x) idx", "\"\"\" # choose the receptor response characteristics res = random_log_uniform(1/s, s, self._size) #", "expr = ((self.count - 1)*self.var + (other.count - 1)*other.var) S = np.sqrt(expr/(self.count +", "the result \"\"\" if var < 0: raise ValueError('Variance must be positive') elif", "else: mu = mean * np.exp(-0.5 * sigma**2) return stats.lognorm(scale=mu, s=sigma) def lognorm_mean_var(mean,", "distribution \"\"\" return self.alpha.sum() def variance(self): \"\"\" variance of the distribution \"\"\" return", "(s1 + s2)**2/(s1**2/nu1 + s2**2/nu2) # calculate the Welch t-value t = (self.mean", "def loguniform_mean(mean, width): \"\"\" returns a loguniform distribution parameterized by its mean and", "the skewness of the distribution `dist` \"\"\" mean = dist.mean() var = dist.var()", "- 1) return res PartialLogUniformDistribution = PartialLogUniformDistribution_gen( a=0, name='PartialLogUniformDistribution' ) NORMAL_DISTRIBUTION_NORMALIZATION = 1/np.sqrt(2*np.pi)", "by its mean and its variance. \"\"\" if variance == 0: return DeterministicDistribution(mean)", "several methods can be used: `fenton` - match the first two moments of", "`%s` for determining the sum of ' 'lognormal distributions. Accepted methods are '", "var = ((s**4 - 1) * np.log(s) - (s**2 - 1)**2) \\ /", "lognorm_mean_var_to_mu_sigma(sum_mean, sum_var, 'scipy') params, _ = optimize.leastsq(pdf_diff, params_init) return params def lognorm_sum(count, mean,", "variance `var_norm`. These parameters are determined by fitting the probability density function to", "parameters of a log-normal distribution that estimates the sum of `count` log-normally distributed", "(width / scale, width * scale) def _rvs(self, s): \"\"\" random variates \"\"\"", "stats.gamma(scale=beta, a=alpha) def loguniform_mean(mean, width): \"\"\" returns a loguniform distribution parameterized by its", "self.var/other.var term1 = np.log(0.25*(var_ratio + 1/var_ratio + 2)) term2 = (self.mean - other.mean)**2/(self.var", "the PDF of the distribution is non-zero \"\"\" extra_args, _, _, _ =", "1 dof = (s1 + s2)**2/(s1**2/nu1 + s2**2/nu2) # calculate the Welch t-value", "normal distributions are described by their mean and variance. Additionally, count denotes how", "frac` is zero Similar to the lognorm distribution, this does not support any", "distribution that estimates the sum of `count` log-normally distributed random variables with mean", "return 0.5 * (q + 1) * np.log(q) / (q - 1) -", "distribution yields a given mean and variance. The optional parameter `definition` can be", "+ (self.var + (self.mean - self.mean)**2)/other.var - 1) elif kind == 'bhattacharyya': var_ratio", "s=sigma) def lognorm_mean_var(mean, variance): \"\"\" returns a lognormal distribution parameterized by its mean", "All values can also be numpy arrays to represent many distributions efficiently \"\"\"", "dist_skewness(dist): \"\"\" returns the skewness of the distribution `dist` \"\"\" mean = dist.mean()", "\"\"\" returns the parameters of a log-normal distribution that estimates the sum of", "(scale**2 - 1) return (width / scale, width * scale) def _rvs(self, s):", "method is used for calculating the cdf and can be either `sum` or", "return res def _cdf(self, x, s): \"\"\" cumulative probability function \"\"\" s =", "return frozen def support(self, *args, **kwds): \"\"\" return the interval in which the", "\"\"\" sum_mean = count sum_var = count * var_norm # get random numbers", "\"\"\" partial log-uniform distribution. a fraction `frac` of the distribution follows a log-uniform", "\"\"\" deterministic distribution that always returns a given value Code copied from https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous", "given mean and variance. The optional parameter `definition` can be used to choose", "var_norm, sim_terms=1e5, bins=64): \"\"\" returns the parameters of a log-normal distribution that estimates", "def _rvs(self, s): \"\"\" random variates \"\"\" # choose the receptor response characteristics", ":] * factor, axis=1) elif x == 0: res = 0 else: factor", "randomly res[np.random.random(self._size) > frac] = 0 return res def _pdf(self, x, s, frac):", "common_variance: if self.count is None: if other.count is None: # neither is sampled", "variance, 'scipy') return stats.lognorm(scale=scale, s=sigma) def lognorm_sum_leastsq(count, var_norm, sim_terms=1e5, bins=64): \"\"\" returns the", "distribution follows a log-uniform distribution, while the remaining fraction `1 - frac` is", "0) res[idx] = s**(2*q_scale[idx] - 1) return res PartialLogUniformDistribution = PartialLogUniformDistribution_gen( a=0, name='PartialLogUniformDistribution'", "- 1)**2) \\ / (4 * s**2 * np.log(s)**2) return mean, var, None,", "gamma_mean_var(mean, variance): \"\"\" returns a gamma distribution with given mean and variance \"\"\"", "are the rates of the underlying exponential processes `method` determines what method is", "frac = s[0], frac[0] # reset broadcasting res = np.zeros_like(x) idx = (1", "prepare the rates of the system self.rates = np.asarray(rates) self.alpha = 1 /", "other.mean)/S return 2*stats.norm.cdf(-0.5*delta) else: # here, we would have to integrate numerically raise", "frac): \"\"\" random variates \"\"\" # choose the items response characteristics res =", "delta = np.abs(self.mean - other.mean)/S return 2*stats.norm.cdf(-0.5*delta) else: # here, we would have", "a spread parameter `width`. The ratio between the maximal value and the minimal", "log_max, size) return np.exp(res) def dist_skewness(dist): \"\"\" returns the skewness of the distribution", "of the distribution \"\"\" return (2 * np.sum(self.alpha**2 * self._terms) - (self.alpha.sum())**2) def", "provides functions and classes for probability distributions, which build upon the scipy.stats package", "variates \"\"\" # choose the items response characteristics res = np.exp(s * np.random.standard_normal(self._size))", "= np.abs(self.mean - other.mean)/S return 2*stats.norm.cdf(-0.5*delta) else: # here, we would have to", "These parameters are determined by fitting the probability density function to a histogram", "Welch t-value t = (self.mean - other.mean)/np.sqrt(s1 + s2) # calculate the probability", "= value - self.mean mean = self.mean + delta/count M2 = M2 +", "stats.lognorm(scale=scale, s=sigma) def lognorm_sum_leastsq(count, var_norm, sim_terms=1e5, bins=64): \"\"\" returns the parameters of a", "spread parameter `sigma` \"\"\" if sigma == 0: return DeterministicDistribution(mean) else: mu =", "moments directly return lognorm_mean_var(count * mean, count * variance) elif method == 'leastsq':", "fraction `frac` of the distribution follows a log-uniform distribution, while the remaining fraction", "calculating the cdf and can be either `sum` or `eigen` \"\"\" if method", "solve a non-linear equation numerically, which might degrade accuracy and performance of the", "distribution, while the remaining fraction `1 - frac` is zero \"\"\" def _rvs(self,", "\"\"\" percent point function (inverse of cdf) \"\"\" s = s[0] # reset", "cumulative probability function \"\"\" s, frac = s[0], frac[0] # reset broadcasting return", "1 delta = value - self.mean mean = self.mean + delta/count M2 =", "LogUniformDistribution_gen(stats.rv_continuous): \"\"\" Log-uniform distribution. \"\"\" def freeze(self, *args, **kwds): frozen = super(LogUniformDistribution_gen, self).freeze(*args,", "loguniform_mean(mean, np.sqrt(width)) def random_log_uniform(v_min, v_max, size): \"\"\" returns random variables that a distributed", "other.mean)/np.sqrt(s1 + s2) # calculate the probability using the Student's T distribution prob", "s) res[idx] = 1/(x[idx] * np.log(s*s)) return res def _cdf(self, x, s): \"\"\"", "self.alpha = 1 / self.rates if np.any(rates <= 0): raise ValueError('All rates must", "accuracy and performance of the result \"\"\" if var < 0: raise ValueError('Variance", "software package. \"\"\" mean2 = mean**2 mu = mean2/np.sqrt(mean2 + variance) sigma =", "sigma = np.sqrt(np.log(1 + variance/mean2)) if definition == 'scipy': return mu, sigma elif", "res def _cdf(self, x, s, frac): \"\"\" cumulative probability function \"\"\" s, frac", "deviation \"\"\" return np.sqrt(self.var) def pdf(self, value, mask=None): \"\"\" return probability density function", "= np.asarray(x) res = np.zeros_like(x) nz = (x > 0) if np.any(nz): factor", "np.sqrt(np.log(1 + variance/mean2)) if definition == 'scipy': return mu, sigma elif definition ==", "return res def cdf(self, x): \"\"\" cumulative density function \"\"\" if not np.isscalar(x):", "NORMAL_DISTRIBUTION_NORMALIZATION/std \\ * np.exp(-0.5*(value - mean)**2 / var) def add_observation(self, value): \"\"\" add", "of parameters and we thus only mimic its interface here. \"\"\" def __init__(self,", "**kwds): \"\"\" return the interval in which the PDF of the distribution is", "value and adjust mean and variance of the distribution. This returns a new", "- 1), count) def distance(self, other, kind='kullback-leibler'): \"\"\" return the distance between two", "0., 1.) def _stats(self): return 0., 0., 0., 0. def _rvs(self): return np.zeros(self._size)", "# choose the receptor response characteristics return random_log_uniform(1/s, s, self._size) def _pdf(self, x,", "return np.exp(res) def dist_skewness(dist): \"\"\" returns the skewness of the distribution `dist` \"\"\"", "log-uniform distribution. a fraction `frac` of the distribution follows a log-uniform distribution, while", "nz = (x > 0) if np.any(nz): if self.method == 'sum': factor =", "broadcasting return frac / (s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2) def _cdf(self, x, s, frac): \"\"\"", "value \"\"\" if mask is None: mean = self.mean var = self.var std", "np.sqrt(0.5*(self.var + other.var)) else: # other is sampled S = self.std else: if", "0: return DeterministicDistribution(mean) else: mu = mean * np.exp(-0.5 * sigma**2) return stats.lognorm(scale=mu,", "NormalDistribution(object): \"\"\" class representing normal distributions \"\"\" def __init__(self, mean, var, count=None): \"\"\"", "loguniform_mean(mean, width): \"\"\" returns a loguniform distribution parameterized by its mean and a", "numpy arrays to represent many distributions efficiently \"\"\" self.mean = mean self.var =", "log space \"\"\" log_min, log_max = np.log(v_min), np.log(v_max) res = np.random.uniform(log_min, log_max, size)", "1: # treat special case separately return DeterministicDistribution(mean) else: scale = mean *", "distance `%s`' % kind) return dist def welch_test(self, other): \"\"\" performs Welch's t-test", "its mean and variance. Here, we need to solve a non-linear equation numerically,", "prob = stats.t.sf(np.abs(t), dof) * 2 return prob def overlap(self, other, common_variance=True): \"\"\"", "\"\"\" evaluate the estimated pdf \"\"\" scale, sigma = params return stats.lognorm.pdf(xs, scale=scale,", "characteristics return sum(np.random.exponential(scale=alpha, size=size) for alpha in self.alpha) def mean(self): \"\"\" mean of", "+ (other.count - 1)*other.var) S = np.sqrt(expr/(self.count + other.count - 2)) delta =", "sigma = lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy') return stats.lognorm(scale=scale, s=sigma) def lognorm_sum_leastsq(count, var_norm, sim_terms=1e5, bins=64):", "if method == 'fenton': # use the moments directly return lognorm_mean_var(count * mean,", "q, s): \"\"\" percent point function (inverse of cdf) \"\"\" s = s[0]", "numbers \"\"\" sum_mean = count sum_var = count * var_norm # get random", "def mean(self): \"\"\" mean of the distribution \"\"\" return self.alpha.sum() def variance(self): \"\"\"", "reset broadcasting return 1 + frac*(-0.5 + 0.5*special.erf(np.log(x)/(s*np.sqrt(2)))) def _ppf(self, q, s, frac):", "DeterministicDistribution(mean) else: mu = mean * np.exp(-0.5 * sigma**2) return stats.lognorm(scale=mu, s=sigma) def", "its mean and a spread parameter `width`. The ratio between the maximal value", "\"\"\" if width == 1: # treat special case separately return DeterministicDistribution(mean) else:", "s2) # calculate the probability using the Student's T distribution prob = stats.t.sf(np.abs(t),", "self.method == 'sum': factor = np.exp(-x*self.rates)/self.ratesx res[nz] = np.sum(self._terms * factor) else: Theta", "factor = np.exp(-x*self.rates) res = 1 - np.sum(self._terms * factor) return res #", "package and extend it. ''' from __future__ import division import numpy as np", "by width**2 \"\"\" if width == 1: # treat special case separately return", "distribution follows a log-normal distribution, while the remaining fraction `1 - frac` is", "else: if self.method == 'sum': factor = np.exp(-x*self.rates)/self.ratesx res[nz] = np.sum(self._terms * factor)", "the receptor response characteristics return sum(np.random.exponential(scale=alpha, size=size) for alpha in self.alpha) def mean(self):", "1 + frac*(-0.5 + 0.5*special.erf(np.log(x)/(s*np.sqrt(2)))) def _ppf(self, q, s, frac): \"\"\" percent point", "\"\"\" random variates \"\"\" # choose the receptor response characteristics return random_log_uniform(1/s, s,", "kind == 'kullback-leibler': dist = 0.5*(np.log(other.var/self.var) + (self.var + (self.mean - self.mean)**2)/other.var -", "other is sampled S = self.std else: if other.count is None: # self", "factor = np.exp(-x[nz, None] * self.rates[..., :]) \\ / self.rates[..., :] res[nz] =", "(log_s + np.log(x[idx]))/(2 * log_s) res[x > s] = 1 return res def", "upon the scipy.stats package and extend it. ''' from __future__ import division import", "# reset broadcasting res = np.zeros_like(q) idx = (q > 0) res[idx] =", "s): \"\"\" percent point function (inverse of cdf) \"\"\" s = s[0] #", "distribution `leastsq` - minimize the error in the interval \"\"\" if method ==", "= count def copy(self): return self.__class__(self.mean, self.var, self.count) @cached_property() def std(self): \"\"\" return", "performance of the result \"\"\" if var < 0: raise ValueError('Variance must be", "= self.support(*args, **kwds) return frozen def support(self, *args, **kwds): \"\"\" return the interval", "value and the minimal value is given by width**2 \"\"\" if width ==", "MIGHT NOT BE NEEDED ANYMORE # ============================================================================== class PartialLogNormDistribution_gen(stats.rv_continuous): \"\"\" partial log-normal distribution.", "= 1 / self.rates if np.any(rates <= 0): raise ValueError('All rates must be", "= mean self.var = var self.count = count def copy(self): return self.__class__(self.mean, self.var,", "`method` determines what method is used for calculating the cdf and can be", "= self.mean(*args, **kwds) scale = extra_args[0] width = mean * (2*scale*np.log(scale)) / (scale**2", "# use the moments directly return lognorm_mean_var(count * mean, count * variance) elif", "to solve a non-linear equation numerically, which might degrade accuracy and performance of", "0.5*(np.log(other.var/self.var) + (self.var + (self.mean - self.mean)**2)/other.var - 1) elif kind == 'bhattacharyya':", "class PartialLogUniformDistribution_gen(stats.rv_continuous): \"\"\" partial log-uniform distribution. a fraction `frac` of the distribution follows", "np.any(rates <= 0): raise ValueError('All rates must be positive') if len(np.unique(self.alpha)) != len(self.alpha):", "returns a loguniform distribution parameterized by its mean and a spread parameter `width`.", "log_max = np.log(v_min), np.log(v_max) res = np.random.uniform(log_min, log_max, size) return np.exp(res) def dist_skewness(dist):", "mean * np.exp(-0.5 * sigma**2) return stats.lognorm(scale=mu, s=sigma) def lognorm_mean_var(mean, variance): \"\"\" returns", "elif kind == 'bhattacharyya': var_ratio = self.var/other.var term1 = np.log(0.25*(var_ratio + 1/var_ratio +", "(4 * s**2 * np.log(s)**2) return mean, var, None, None LogUniformDistribution = LogUniformDistribution_gen(", "self.mean + delta/count M2 = M2 + delta*(value - mean) return NormalDistribution(mean, M2/(count", "percent point function (inverse of cdf) \"\"\" s, frac = s[0], frac[0] #", "\"\"\" variance of the distribution \"\"\" return (2 * np.sum(self.alpha**2 * self._terms) -", "s, frac): \"\"\" random variates \"\"\" # choose the receptor response characteristics res", "np.log(s)**2) return mean, var, None, None LogUniformDistribution = LogUniformDistribution_gen( a=0, name='LogUniformDistribution' ) class", "2015 @author: <NAME> <<EMAIL>> This module provides functions and classes for probability distributions,", "0.5 * (q + 1) * np.log(q) / (q - 1) - 1", "exponential processes `method` determines what method is used for calculating the cdf and", "only mimic its interface here. \"\"\" def __init__(self, rates, method='sum'): \"\"\" initializes the", "return self.__class__(self.mean, self.var, self.count) @cached_property() def std(self): \"\"\" return standard deviation \"\"\" return", "two distributions \"\"\" if common_variance: if self.count is None: if other.count is None:", "`var_norm`. These parameters are determined by fitting the probability density function to a", "variates \"\"\" # choose the receptor response characteristics return random_log_uniform(1/s, s, self._size) def", "s2 = self.var/self.count, other.var/other.count nu1, nu2 = self.count - 1, other.count - 1", "None: # neither is sampled S = np.sqrt(0.5*(self.var + other.var)) else: # other", "method `%s` for determining the sum of ' 'lognormal distributions. Accepted methods are", "import stats, special, linalg, optimize from ..data_structures.cache import cached_property def lognorm_mean_var_to_mu_sigma(mean, variance, definition='scipy'):", "return (1 - frac) + frac*res def _ppf(self, q, s, frac): \"\"\" percent", "= 1 - linalg.expm(x*Theta)[0, :].sum() return res def cdf(self, x): \"\"\" cumulative density", "= s[0], frac[0] # reset broadcasting q_scale = (q - (1 - frac))", "params, _ = optimize.leastsq(pdf_diff, params_init) return params def lognorm_sum(count, mean, variance, method='fenton'): \"\"\"", "equation numerically, which might degrade accuracy and performance of the result \"\"\" if", "\"\"\" returns a loguniform distribution parameterized by its mean and variance. Here, we", "zero \"\"\" def _rvs(self, s, frac): \"\"\" random variates \"\"\" # choose the", "delta = value - self.mean mean = self.mean + delta/count M2 = M2", "normal distributions \"\"\" # calculate the degrees of freedom s1, s2 = self.var/self.count,", "mean = (s**2 - 1)/(2*s*np.log(s)) var = ((s**4 - 1) * np.log(s) -", "distribution, while the remaining fraction `1 - frac` is zero Similar to the", "res def _stats(self, s): \"\"\" calculates statistics of the distribution \"\"\" mean =", "least square fitting params_init = lognorm_mean_var_to_mu_sigma(sum_mean, sum_var, 'scipy') params, _ = optimize.leastsq(pdf_diff, params_init)", "while the remaining fraction `1 - frac` is zero \"\"\" def _rvs(self, s,", "that always returns a given value Code copied from https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous \"\"\" def _cdf(self,", "np.exp(-x[nz, None] * self.rates[..., :]) \\ / self.rates[..., :] res[nz] = np.sum(self._terms[..., :]", "calculates statistics of the distribution \"\"\" mean = (s**2 - 1)/(2*s*np.log(s)) var =", "= (q_scale > 0) res[idx] = np.exp(s * special.ndtri(q_scale[idx])) return res PartialLogNormDistribution =", "None] * self.rates[..., :]) \\ / self.rates[..., :] res[nz] = np.sum(self._terms[..., :] *", "distribution is again log-normal with mean and variance determined from the given parameters.", "1) return LogUniformDistribution(scale=scale, s=width) def loguniform_mean_var(mean, var): \"\"\" returns a loguniform distribution parameterized", "choose the receptor response characteristics res = random_log_uniform(1/s, s, self._size) # switch off", "return (width / scale, width * scale) def _rvs(self, s): \"\"\" random variates", "\"\"\" mean of the distribution \"\"\" return self.alpha.sum() def variance(self): \"\"\" variance of", "\"\"\" if kind == 'kullback-leibler': dist = 0.5*(np.log(other.var/self.var) + (self.var + (self.mean -", "(q - 1) - 1 - cv2 width = optimize.newton(_rhs, 1.1) return loguniform_mean(mean,", "- density # do the least square fitting params_init = lognorm_mean_var_to_mu_sigma(sum_mean, sum_var, 'scipy')", "self.copy() else: M2 = self.var*(self.count - 1) count = self.count + 1 delta", "variance of the distribution \"\"\" return (2 * np.sum(self.alpha**2 * self._terms) - (self.alpha.sum())**2)", "idx = (q_scale > 0) res[idx] = s**(2*q_scale[idx] - 1) return res PartialLogUniformDistribution", "of the system self.rates = np.asarray(rates) self.alpha = 1 / self.rates if np.any(rates", "* special.ndtri(q_scale[idx])) return res PartialLogNormDistribution = PartialLogNormDistribution_gen( a=0, name='PartialLogNormDistribution' ) class PartialLogUniformDistribution_gen(stats.rv_continuous): \"\"\"", "\"\"\" cumulative probability function \"\"\" s = s[0] # reset broadcasting res =", "returns a loguniform distribution parameterized by its mean and variance. Here, we need", "system self.rates = np.asarray(rates) self.alpha = 1 / self.rates if np.any(rates <= 0):", "res PartialLogNormDistribution = PartialLogNormDistribution_gen( a=0, name='PartialLogNormDistribution' ) class PartialLogUniformDistribution_gen(stats.rv_continuous): \"\"\" partial log-uniform distribution.", "* log_s) res[x > s] = 1 return res def _ppf(self, q, s):", "= 1 - np.sum(self._terms * factor) return res # ============================================================================== # OLD DISTRIBUTIONS", "+ s2)**2/(s1**2/nu1 + s2**2/nu2) # calculate the Welch t-value t = (self.mean -", "mat = self.alpha[:, None] \\ / (self.alpha[:, None] - self.alpha[None, :]) mat[(self.alpha[:, None]", "the rates of the system self.rates = np.asarray(rates) self.alpha = 1 / self.rates", "_, _, _ = self._parse_args_stats(*args, **kwds) mean = self.mean(*args, **kwds) scale = extra_args[0]", "np.asarray(x) res = np.zeros_like(x) nz = (x > 0) if np.any(nz): factor =", "mu = mean * np.exp(-0.5 * sigma**2) return stats.lognorm(scale=mu, s=sigma) def lognorm_mean_var(mean, variance):", "1) return res def _stats(self, s): \"\"\" calculates statistics of the distribution \"\"\"", "positive') elif var == 0: # treat special case separately return DeterministicDistribution(mean) else:", "distribution with given mean and variance \"\"\" alpha = mean**2 / variance beta", "`frac` of the distribution follows a log-uniform distribution, while the remaining fraction `1", "drawing `sim_terms` random numbers \"\"\" sum_mean = count sum_var = count * var_norm", "a spread parameter `sigma` \"\"\" if sigma == 0: return DeterministicDistribution(mean) else: mu", "variables with mean 1 and variance `var_norm`. These parameters are determined by fitting", "- 1) return (width / scale, width * scale) def _rvs(self, s): \"\"\"", "sum(np.random.exponential(scale=alpha, size=size) for alpha in self.alpha) def mean(self): \"\"\" mean of the distribution", "= PartialLogUniformDistribution_gen( a=0, name='PartialLogUniformDistribution' ) NORMAL_DISTRIBUTION_NORMALIZATION = 1/np.sqrt(2*np.pi) class NormalDistribution(object): \"\"\" class representing", "= np.exp(s * np.random.standard_normal(self._size)) if frac != 1: # switch off items randomly", "def _ppf(self, q, s, frac): \"\"\" percent point function (inverse of cdf) \"\"\"", "stats.lognorm(scale=scale * mean, s=sigma) else: raise ValueError('Unknown method `%s` for determining the sum", "scale, sigma = lognorm_sum_leastsq(count, var_norm) return stats.lognorm(scale=scale * mean, s=sigma) else: raise ValueError('Unknown", "(x > 0) if np.any(nz): factor = np.exp(-x[nz, None]*self.rates[..., :]) res = 1", "variates \"\"\" # choose the receptor response characteristics res = random_log_uniform(1/s, s, self._size)", "DeterministicDistribution = DeterministicDistribution_gen( name='DeterministicDistribution' ) class LogUniformDistribution_gen(stats.rv_continuous): \"\"\" Log-uniform distribution. \"\"\" def freeze(self,", "mean, variance, method='fenton'): \"\"\" returns an estimate of the distribution of the sum", "* scale) def _rvs(self, s): \"\"\" random variates \"\"\" # choose the receptor", "None] - self.alpha[None, :]) mat[(self.alpha[:, None] - self.alpha[None, :]) == 0] = 1", "'leastsq': # determine the moments from fitting var_norm = variance / mean**2 scale,", "1 - np.sum(self._terms[..., :] * factor, axis=1) elif x == 0: res =", "of `count` log-normally distributed variables with `mean` and `variance`. The returned distribution is", "observed value and adjust mean and variance of the distribution. This returns a", "the probability density function to a histogram obtained by drawing `sim_terms` random numbers", "def random_log_uniform(v_min, v_max, size): \"\"\" returns random variables that a distributed uniformly in", "log-normal distribution, while the remaining fraction `1 - frac` is zero Similar to", "= self.var std = self.std else: mean = self.mean[mask] var = self.var[mask] std", "PartialLogNormDistribution_gen(stats.rv_continuous): \"\"\" partial log-normal distribution. a fraction `frac` of the distribution follows a", "stats, special, linalg, optimize from ..data_structures.cache import cached_property def lognorm_mean_var_to_mu_sigma(mean, variance, definition='scipy'): \"\"\"", "coefficient of variation def _rhs(q): \"\"\" match the coefficient of variation \"\"\" return", "estimated pdf \"\"\" scale, sigma = params return stats.lognorm.pdf(xs, scale=scale, s=sigma) - density", "we need later with np.errstate(divide='ignore'): mat = self.alpha[:, None] \\ / (self.alpha[:, None]", "* np.exp(-1/2*(np.log(x)/s)**2) def _cdf(self, x, s, frac): \"\"\" cumulative probability function \"\"\" s,", "* np.log(s*s)) return res def _cdf(self, x, s, frac): \"\"\" cumulative probability function", "scipy.stats.rv_continuous does not support a variable number of parameters and we thus only", "and variance. Here, we need to solve a non-linear equation numerically, which might", "classes for probability distributions, which build upon the scipy.stats package and extend it.", "function \"\"\" s, frac = s[0], frac[0] # reset broadcasting res = np.zeros_like(x)", ":]) mat[(self.alpha[:, None] - self.alpha[None, :]) == 0] = 1 self._terms = np.prod(mat,", "res[x > s] = 1 return res def _ppf(self, q, s): \"\"\" percent", "1 return res def _ppf(self, q, s): \"\"\" percent point function (inverse of", "is None: return self.copy() else: M2 = self.var*(self.count - 1) count = self.count", "are sampled expr = ((self.count - 1)*self.var + (other.count - 1)*other.var) S =", "== 'numpy': return np.log(mu), sigma else: raise ValueError('Unknown definition `%s`' % definition) def", "\"\"\" if common_variance: if self.count is None: if other.count is None: # neither", "def gamma_mean_var(mean, variance): \"\"\" returns a gamma distribution with given mean and variance", "this does not support any location parameter \"\"\" def _rvs(self, s, frac): \"\"\"", "<NAME> <<EMAIL>> This module provides functions and classes for probability distributions, which build", "lognorm_mean_var_to_mu_sigma(mean, variance, definition='scipy'): \"\"\" determines the parameters of the log-normal distribution such that", "the remaining fraction `1 - frac` is zero \"\"\" def _rvs(self, s, frac):", "= self.std[mask] return NORMAL_DISTRIBUTION_NORMALIZATION/std \\ * np.exp(-0.5*(value - mean)**2 / var) def add_observation(self,", "follows a log-uniform distribution, while the remaining fraction `1 - frac` is zero", "np.histogram(vals, bins=bins, range=[0, val_max], density=True) def pdf_diff(params): \"\"\" evaluate the estimated pdf \"\"\"", "frac*(-0.5 + 0.5*special.erf(np.log(x)/(s*np.sqrt(2)))) def _ppf(self, q, s, frac): \"\"\" percent point function (inverse", "+ bins[1:]) density, _ = np.histogram(vals, bins=bins, range=[0, val_max], density=True) def pdf_diff(params): \"\"\"", "* (2*width*np.log(width)) / (width**2 - 1) return LogUniformDistribution(scale=scale, s=width) def loguniform_mean_var(mean, var): \"\"\"", "self.var[mask] std = self.std[mask] return NORMAL_DISTRIBUTION_NORMALIZATION/std \\ * np.exp(-0.5*(value - mean)**2 / var)", "and variance \"\"\" alpha = mean**2 / variance beta = variance / mean", "`leastsq`].') def gamma_mean_var(mean, variance): \"\"\" returns a gamma distribution with given mean and", "/ frac res = np.zeros_like(q) idx = (q_scale > 0) res[idx] = s**(2*q_scale[idx]", "= self.count + 1 delta = value - self.mean mean = self.mean +", "== 'leastsq': # determine the moments from fitting var_norm = variance / mean**2", "= np.zeros_like(q) idx = (q_scale > 0) res[idx] = s**(2*q_scale[idx] - 1) return", "else: Theta = np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1) res = 1 - linalg.expm(x*Theta)[0,", "std = self.std else: mean = self.mean[mask] var = self.var[mask] std = self.std[mask]", "if len(np.unique(self.alpha)) != len(self.alpha): raise ValueError('The current implementation only supports cases ' 'where", "self.rates = np.asarray(rates) self.alpha = 1 / self.rates if np.any(rates <= 0): raise", "lognormal distribution parameterized by its mean and a spread parameter `sigma` \"\"\" if", "off receptors randomly if frac != 1: res[np.random.random(self._size) > frac] = 0 return", "np.random.uniform(log_min, log_max, size) return np.exp(res) def dist_skewness(dist): \"\"\" returns the skewness of the", "returns a lognormal distribution parameterized by its mean and a spread parameter `sigma`", "must be positive') elif var == 0: # treat special case separately return", "nz = (x > 0) if np.any(nz): factor = np.exp(-x[nz, None]*self.rates[..., :]) res", "res[np.random.random(self._size) > frac] = 0 return res def _pdf(self, x, s, frac): \"\"\"", "res = 1 - linalg.expm(x*Theta)[0, :].sum() return res def cdf(self, x): \"\"\" cumulative", "items randomly res[np.random.random(self._size) > frac] = 0 return res def _pdf(self, x, s,", "BE NEEDED ANYMORE # ============================================================================== class PartialLogNormDistribution_gen(stats.rv_continuous): \"\"\" partial log-normal distribution. a fraction", "variance): \"\"\" returns a gamma distribution with given mean and variance \"\"\" alpha", "res = np.zeros_like(x) nz = (x > 0) if np.any(nz): factor = np.exp(-x[nz,", "1) elif kind == 'bhattacharyya': var_ratio = self.var/other.var term1 = np.log(0.25*(var_ratio + 1/var_ratio", "return stats.gamma(scale=beta, a=alpha) def loguniform_mean(mean, width): \"\"\" returns a loguniform distribution parameterized by", "- (1 - frac)) / frac res = np.zeros_like(q) idx = (q_scale >", "/ var**(3/2) class DeterministicDistribution_gen(stats.rv_continuous): \"\"\" deterministic distribution that always returns a given value", "supports cases ' 'where all rates are different from each other.') # calculate", "for the given software package. \"\"\" mean2 = mean**2 mu = mean2/np.sqrt(mean2 +", "s): \"\"\" random variates \"\"\" # choose the receptor response characteristics return random_log_uniform(1/s,", "a log-normal distribution, while the remaining fraction `1 - frac` is zero Similar", "zero Similar to the lognorm distribution, this does not support any location parameter", "* factor, axis=1) else: Theta = (np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1)) for i", "(x < s) res[idx] = frac/(x[idx] * np.log(s*s)) return res def _cdf(self, x,", "(self.var + (self.mean - self.mean)**2)/other.var - 1) elif kind == 'bhattacharyya': var_ratio =", "lognorm_sum_leastsq(count, var_norm, sim_terms=1e5, bins=64): \"\"\" returns the parameters of a log-normal distribution that", "if frac != 1: # switch off items randomly res[np.random.random(self._size) > frac] =", "variance of the distribution. This returns a new distribution and only works if", "= np.sqrt(1 - np.exp(-dist_b)) else: raise ValueError('Unknown distance `%s`' % kind) return dist", "\"\"\" random variates \"\"\" # choose the items response characteristics res = np.exp(s", "return res def _pdf(self, x, s, frac): \"\"\" probability density function \"\"\" s,", "2)) term2 = (self.mean - other.mean)**2/(self.var + other.var) dist = 0.25*(term1 + term2)", "distribution. a fraction `frac` of the distribution follows a log-normal distribution, while the", "def distance(self, other, kind='kullback-leibler'): \"\"\" return the distance between two normal distributions \"\"\"", "fraction `1 - frac` is zero \"\"\" def _rvs(self, s, frac): \"\"\" random", "res[idx] = s**(2*q_scale[idx] - 1) return res PartialLogUniformDistribution = PartialLogUniformDistribution_gen( a=0, name='PartialLogUniformDistribution' )", "s) res[idx] = frac/(x[idx] * np.log(s*s)) return res def _cdf(self, x, s, frac):", "= (np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1)) for i in np.flatnonzero(nz): res.flat[i] = \\", "the distribution. This returns a new distribution and only works if count was", "== 0: # treat special case separately return DeterministicDistribution(mean) else: # determine width", "else: Theta = (np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1)) for i in np.flatnonzero(nz): res.flat[i]", "= self.std else: mean = self.mean[mask] var = self.var[mask] std = self.std[mask] return", "1 - linalg.expm(x*Theta)[0, :].sum() return res def cdf(self, x): \"\"\" cumulative density function", "1)**2) \\ / (4 * s**2 * np.log(s)**2) return mean, var, None, None", "returns a new distribution and only works if count was set \"\"\" if", "given by width**2 \"\"\" if width == 1: # treat special case separately", "\"\"\" if mask is None: mean = self.mean var = self.var std =", "len(self.alpha): raise ValueError('The current implementation only supports cases ' 'where all rates are", "params def lognorm_sum(count, mean, variance, method='fenton'): \"\"\" returns an estimate of the distribution", "res[nz] = np.sum(self._terms[..., :] * factor, axis=1) else: Theta = (np.diag(-self.rates, 0) +", "frac): \"\"\" cumulative probability function \"\"\" s, frac = s[0], frac[0] # reset", "\"\"\" alpha = mean**2 / variance beta = variance / mean return stats.gamma(scale=beta,", "Feb 24, 2015 @author: <NAME> <<EMAIL>> This module provides functions and classes for", "mean = self.mean var = self.var std = self.std else: mean = self.mean[mask]", "else: # determine width parameter numerically cv2 = var / mean**2 # match", "/ scale, width * scale) def _rvs(self, s): \"\"\" random variates \"\"\" #", "all rates are different from each other.') # calculate terms that we need", "return (2 * np.sum(self.alpha**2 * self._terms) - (self.alpha.sum())**2) def pdf(self, x): \"\"\" probability", "else: scale = mean * (2*width*np.log(width)) / (width**2 - 1) return LogUniformDistribution(scale=scale, s=width)", "choose the items response characteristics res = np.exp(s * np.random.standard_normal(self._size)) if frac !=", "the Welch t-value t = (self.mean - other.mean)/np.sqrt(s1 + s2) # calculate the", "mean and variance of the distribution. This returns a new distribution and only", "if mask is None: mean = self.mean var = self.var std = self.std", "((s**4 - 1) * np.log(s) - (s**2 - 1)**2) \\ / (4 *", "loguniform distribution parameterized by its mean and a spread parameter `width`. The ratio", "var self.count = count def copy(self): return self.__class__(self.mean, self.var, self.count) @cached_property() def std(self):", "space \"\"\" log_min, log_max = np.log(v_min), np.log(v_max) res = np.random.uniform(log_min, log_max, size) return", "width parameter numerically cv2 = var / mean**2 # match square coefficient of", "and a spread parameter `sigma` \"\"\" if sigma == 0: return DeterministicDistribution(mean) else:", "mean)**2 / var) def add_observation(self, value): \"\"\" add an observed value and adjust", "def _pdf(self, x, s): \"\"\" probability density function \"\"\" s = s[0] #", "res.flat[i] = \\ 1 - linalg.expm(x.flat[i]*Theta)[0, :].sum() elif x == 0: res =", "== 'bhattacharyya': var_ratio = self.var/other.var term1 = np.log(0.25*(var_ratio + 1/var_ratio + 2)) term2", "(q_scale > 0) res[idx] = np.exp(s * special.ndtri(q_scale[idx])) return res PartialLogNormDistribution = PartialLogNormDistribution_gen(", "mean2 = mean**2 mu = mean2/np.sqrt(mean2 + variance) sigma = np.sqrt(np.log(1 + variance/mean2))", "if width == 1: # treat special case separately return DeterministicDistribution(mean) else: scale", "on Feb 24, 2015 @author: <NAME> <<EMAIL>> This module provides functions and classes", "common_variance=True): \"\"\" estimates the amount of overlap between two distributions \"\"\" if common_variance:", "its variance. \"\"\" if variance == 0: return DeterministicDistribution(mean) else: scale, sigma =", "def pdf_diff(params): \"\"\" evaluate the estimated pdf \"\"\" scale, sigma = params return", "fitting params_init = lognorm_mean_var_to_mu_sigma(sum_mean, sum_var, 'scipy') params, _ = optimize.leastsq(pdf_diff, params_init) return params", "both are sampled expr = ((self.count - 1)*self.var + (other.count - 1)*other.var) S", "= dist.mean() var = dist.var() return (dist.moment(3) - 3*mean*var - mean**3) / var**(3/2)", "x*s) & (x < s) log_s = np.log(s) res[idx] = (log_s + np.log(x[idx]))/(2", "to represent many distributions efficiently \"\"\" self.mean = mean self.var = var self.count", "factor) else: Theta = np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1) res = 1 -", "self.var, self.count) @cached_property() def std(self): \"\"\" return standard deviation \"\"\" return np.sqrt(self.var) def", "+ delta/count M2 = M2 + delta*(value - mean) return NormalDistribution(mean, M2/(count -", "x, s, frac): \"\"\" cumulative probability function \"\"\" s, frac = s[0], frac[0]", "of a log-normal distribution that estimates the sum of `count` log-normally distributed random", "self.count - 1, other.count - 1 dof = (s1 + s2)**2/(s1**2/nu1 + s2**2/nu2)", "by fitting the probability density function to a histogram obtained by drawing `sim_terms`", "interval in which the PDF of the distribution is non-zero \"\"\" extra_args, _,", "== 0: return DeterministicDistribution(mean) else: mu = mean * np.exp(-0.5 * sigma**2) return", "self.mean var = self.var std = self.std else: mean = self.mean[mask] var =", "distributions \"\"\" if common_variance: if self.count is None: if other.count is None: #", "if np.any(nz): if self.method == 'sum': factor = np.exp(-x[nz, None] * self.rates[..., :])", "must be positive') if len(np.unique(self.alpha)) != len(self.alpha): raise ValueError('The current implementation only supports", "other.count - 2)) delta = np.abs(self.mean - other.mean)/S return 2*stats.norm.cdf(-0.5*delta) else: # here,", "= (q_scale > 0) res[idx] = s**(2*q_scale[idx] - 1) return res PartialLogUniformDistribution =", "random variates \"\"\" # choose the receptor response characteristics return sum(np.random.exponential(scale=alpha, size=size) for", "partial log-normal distribution. a fraction `frac` of the distribution follows a log-normal distribution,", "\"\"\" Log-uniform distribution. \"\"\" def freeze(self, *args, **kwds): frozen = super(LogUniformDistribution_gen, self).freeze(*args, **kwds)", "# ============================================================================== # OLD DISTRIBUTIONS THAT MIGHT NOT BE NEEDED ANYMORE # ==============================================================================", "parameter \"\"\" def _rvs(self, s, frac): \"\"\" random variates \"\"\" # choose the", "else: # both are sampled expr = ((self.count - 1)*self.var + (other.count -", "1) def rvs(self, size): \"\"\" random variates \"\"\" # choose the receptor response", "None] - self.alpha[None, :]) == 0] = 1 self._terms = np.prod(mat, 1) def", "by their mean and variance. Additionally, count denotes how many observations were used", "s) log_s = np.log(s) res[idx] = (log_s + np.log(x[idx]))/(2 * log_s) res[x >", "np.sqrt(sum_var) bins = np.linspace(0, val_max, bins + 1) xs = 0.5*(bins[:-1] + bins[1:])", "= self.var*(self.count - 1) count = self.count + 1 delta = value -", "the sum of `count` log-normally distributed variables with `mean` and `variance`. The returned", "self.alpha[:, None] \\ / (self.alpha[:, None] - self.alpha[None, :]) mat[(self.alpha[:, None] - self.alpha[None,", "skewness of the distribution `dist` \"\"\" mean = dist.mean() var = dist.var() return", "the interval \"\"\" if method == 'fenton': # use the moments directly return", "np.zeros(self._size) DeterministicDistribution = DeterministicDistribution_gen( name='DeterministicDistribution' ) class LogUniformDistribution_gen(stats.rv_continuous): \"\"\" Log-uniform distribution. \"\"\" def", "kind == 'hellinger': dist_b = self.distance(other, kind='bhattacharyya') dist = np.sqrt(1 - np.exp(-dist_b)) else:", "are ' '[`fenton`, `leastsq`].') def gamma_mean_var(mean, variance): \"\"\" returns a gamma distribution with", "\"\"\" def freeze(self, *args, **kwds): frozen = super(LogUniformDistribution_gen, self).freeze(*args, **kwds) frozen.support = self.support(*args,", "frac[0] # reset broadcasting q_scale = (q - (1 - frac)) / frac", "variance, definition='scipy'): \"\"\" determines the parameters of the log-normal distribution such that the", "(1 < x*s) & (x < s) res[idx] = 1/(x[idx] * np.log(s*s)) return", "1 - cv2 width = optimize.newton(_rhs, 1.1) return loguniform_mean(mean, np.sqrt(width)) def random_log_uniform(v_min, v_max,", "of overlap between two distributions \"\"\" if common_variance: if self.count is None: if", "which might degrade accuracy and performance of the result \"\"\" if var <", "_rvs(self): return np.zeros(self._size) DeterministicDistribution = DeterministicDistribution_gen( name='DeterministicDistribution' ) class LogUniformDistribution_gen(stats.rv_continuous): \"\"\" Log-uniform distribution.", "'where all rates are different from each other.') # calculate terms that we", "`mean` and `variance`. The returned distribution is again log-normal with mean and variance", "\"\"\" # calculate the degrees of freedom s1, s2 = self.var/self.count, other.var/other.count nu1,", "`dist` \"\"\" mean = dist.mean() var = dist.var() return (dist.moment(3) - 3*mean*var -", "DeterministicDistribution_gen(stats.rv_continuous): \"\"\" deterministic distribution that always returns a given value Code copied from", "np.log(v_max) res = np.random.uniform(log_min, log_max, size) return np.exp(res) def dist_skewness(dist): \"\"\" returns the", "\"\"\" return np.sqrt(self.var) def pdf(self, value, mask=None): \"\"\" return probability density function at", "self.count + 1 delta = value - self.mean mean = self.mean + delta/count", "two normal distributions \"\"\" # calculate the degrees of freedom s1, s2 =", "self.rates if np.any(rates <= 0): raise ValueError('All rates must be positive') if len(np.unique(self.alpha))", "from scipy import stats, special, linalg, optimize from ..data_structures.cache import cached_property def lognorm_mean_var_to_mu_sigma(mean,", "> 0) if np.any(nz): if self.method == 'sum': factor = np.exp(-x[nz, None] *", "+ other.var)) else: # other is sampled S = self.std else: if other.count", "PartialLogUniformDistribution_gen( a=0, name='PartialLogUniformDistribution' ) NORMAL_DISTRIBUTION_NORMALIZATION = 1/np.sqrt(2*np.pi) class NormalDistribution(object): \"\"\" class representing normal", "/ self.rates[..., :] res[nz] = np.sum(self._terms[..., :] * factor, axis=1) else: Theta =", "determines the parameters of the log-normal distribution such that the distribution yields a", "distribution parameterized by its mean and a spread parameter `sigma` \"\"\" if sigma", "(self.alpha.sum())**2) def pdf(self, x): \"\"\" probability density function \"\"\" if not np.isscalar(x): x", "= s[0], frac[0] # reset broadcasting res = np.zeros_like(x) idx = (1 <", "\"\"\" random variates \"\"\" # choose the receptor response characteristics res = random_log_uniform(1/s,", "distribution such that the distribution yields a given mean and variance. The optional", "np.log(s*s)) return res def _cdf(self, x, s): \"\"\" cumulative probability function \"\"\" s", ") class PartialLogUniformDistribution_gen(stats.rv_continuous): \"\"\" partial log-uniform distribution. a fraction `frac` of the distribution", "rvs(self, size): \"\"\" random variates \"\"\" # choose the receptor response characteristics return", "def __init__(self, rates, method='sum'): \"\"\" initializes the hypoexponential distribution. `rates` are the rates", "the underlying exponential processes `method` determines what method is used for calculating the", "variance / mean**2 scale, sigma = lognorm_sum_leastsq(count, var_norm) return stats.lognorm(scale=scale * mean, s=sigma)", "_cdf(self, x): return np.where(x < 0, 0., 1.) def _stats(self): return 0., 0.,", "of variation \"\"\" return 0.5 * (q + 1) * np.log(q) / (q", "NEEDED ANYMORE # ============================================================================== class PartialLogNormDistribution_gen(stats.rv_continuous): \"\"\" partial log-normal distribution. a fraction `frac`", "separately return DeterministicDistribution(mean) else: # determine width parameter numerically cv2 = var /", "the lognorm distribution, this does not support any location parameter \"\"\" def _rvs(self,", "res = random_log_uniform(1/s, s, self._size) # switch off receptors randomly if frac !=", "variance determined from the given parameters. Here, several methods can be used: `fenton`", "return 1 + frac*(-0.5 + 0.5*special.erf(np.log(x)/(s*np.sqrt(2)))) def _ppf(self, q, s, frac): \"\"\" percent", "res = 0 else: factor = np.exp(-x*self.rates) res = 1 - np.sum(self._terms *", "and its variance. \"\"\" if variance == 0: return DeterministicDistribution(mean) else: scale, sigma", "1 return (1 - frac) + frac*res def _ppf(self, q, s, frac): \"\"\"", "lognorm_mean_var(count * mean, count * variance) elif method == 'leastsq': # determine the", "== 'hellinger': dist_b = self.distance(other, kind='bhattacharyya') dist = np.sqrt(1 - np.exp(-dist_b)) else: raise", "self.method = method # prepare the rates of the system self.rates = np.asarray(rates)", "linalg.expm(x.flat[i]*Theta)[0, :].sum() elif x == 0: res = 0 else: if self.method ==", "return np.zeros(self._size) DeterministicDistribution = DeterministicDistribution_gen( name='DeterministicDistribution' ) class LogUniformDistribution_gen(stats.rv_continuous): \"\"\" Log-uniform distribution. \"\"\"", "value Code copied from https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous \"\"\" def _cdf(self, x): return np.where(x < 0,", "'scipy') return stats.lognorm(scale=scale, s=sigma) def lognorm_sum_leastsq(count, var_norm, sim_terms=1e5, bins=64): \"\"\" returns the parameters", "This returns a new distribution and only works if count was set \"\"\"", "if method in {'sum', 'eigen'}: self.method = method # prepare the rates of", "is None: # neither is sampled S = np.sqrt(0.5*(self.var + other.var)) else: #", "= 0.5*(bins[:-1] + bins[1:]) density, _ = np.histogram(vals, bins=bins, range=[0, val_max], density=True) def", "scale = mean * (2*width*np.log(width)) / (width**2 - 1) return LogUniformDistribution(scale=scale, s=width) def", "= np.log(0.25*(var_ratio + 1/var_ratio + 2)) term2 = (self.mean - other.mean)**2/(self.var + other.var)", "if var < 0: raise ValueError('Variance must be positive') elif var == 0:", "receptor response characteristics res = random_log_uniform(1/s, s, self._size) # switch off receptors randomly", "be used: `fenton` - match the first two moments of the distribution `leastsq`", "of cdf) \"\"\" s = s[0] # reset broadcasting res = np.zeros_like(q) idx", "'fenton': # use the moments directly return lognorm_mean_var(count * mean, count * variance)", "`frac` of the distribution follows a log-normal distribution, while the remaining fraction `1", "off items randomly res[np.random.random(self._size) > frac] = 0 return res def _pdf(self, x,", "self.count is None: if other.count is None: # neither is sampled S =", "function at value \"\"\" if mask is None: mean = self.mean var =", "1) res = 1 - linalg.expm(x*Theta)[0, :].sum() return res def cdf(self, x): \"\"\"", "' 'where all rates are different from each other.') # calculate terms that", "\"\"\" if method in {'sum', 'eigen'}: self.method = method # prepare the rates", "\"\"\" # choose the receptor response characteristics return sum(np.random.exponential(scale=alpha, size=size) for alpha in", "distance between two normal distributions \"\"\" if kind == 'kullback-leibler': dist = 0.5*(np.log(other.var/self.var)", "= self.distance(other, kind='bhattacharyya') dist = np.sqrt(1 - np.exp(-dist_b)) else: raise ValueError('Unknown distance `%s`'", "var_norm) vals = dist.rvs((int(sim_terms), count)).sum(axis=1) # get the histogram val_max = sum_mean +", "degrade accuracy and performance of the result \"\"\" if var < 0: raise", "the Student's T distribution prob = stats.t.sf(np.abs(t), dof) * 2 return prob def", "\"\"\" class representing normal distributions \"\"\" def __init__(self, mean, var, count=None): \"\"\" normal", "elif kind == 'hellinger': dist_b = self.distance(other, kind='bhattacharyya') dist = np.sqrt(1 - np.exp(-dist_b))", "NORMAL_DISTRIBUTION_NORMALIZATION = 1/np.sqrt(2*np.pi) class NormalDistribution(object): \"\"\" class representing normal distributions \"\"\" def __init__(self,", "determined by fitting the probability density function to a histogram obtained by drawing", "`definition` can be used to choose a definition of the resulting parameters that", "be positive') elif var == 0: # treat special case separately return DeterministicDistribution(mean)", "statistics of the distribution \"\"\" mean = (s**2 - 1)/(2*s*np.log(s)) var = ((s**4", "term2) elif kind == 'hellinger': dist_b = self.distance(other, kind='bhattacharyya') dist = np.sqrt(1 -", "width**2 \"\"\" if width == 1: # treat special case separately return DeterministicDistribution(mean)", "variable number of parameters and we thus only mimic its interface here. \"\"\"", "/ self.rates if np.any(rates <= 0): raise ValueError('All rates must be positive') if", "< s) res[idx] = frac/(x[idx] * np.log(s*s)) return res def _cdf(self, x, s,", "not support any location parameter \"\"\" def _rvs(self, s, frac): \"\"\" random variates", "< x*s) & (x < s) res[idx] = frac/(x[idx] * np.log(s*s)) return res", "@author: <NAME> <<EMAIL>> This module provides functions and classes for probability distributions, which", "< x*s) & (x < s) log_s = np.log(s) res[idx] = (log_s +", "self.var/self.count, other.var/other.count nu1, nu2 = self.count - 1, other.count - 1 dof =", "parameter `width`. The ratio between the maximal value and the minimal value is", "'kullback-leibler': dist = 0.5*(np.log(other.var/self.var) + (self.var + (self.mean - self.mean)**2)/other.var - 1) elif", "''' from __future__ import division import numpy as np from scipy import stats,", "np.abs(self.mean - other.mean)/S return 2*stats.norm.cdf(-0.5*delta) else: # here, we would have to integrate", "overlap between two distributions \"\"\" if common_variance: if self.count is None: if other.count", "Similar to the lognorm distribution, this does not support any location parameter \"\"\"", "- 3*mean*var - mean**3) / var**(3/2) class DeterministicDistribution_gen(stats.rv_continuous): \"\"\" deterministic distribution that always", "np.log(q) / (q - 1) - 1 - cv2 width = optimize.newton(_rhs, 1.1)", "\"\"\" normal distributions are described by their mean and variance. Additionally, count denotes", "is sampled S = np.sqrt(0.5*(self.var + other.var)) else: # other is sampled S", "1/(x[idx] * np.log(s*s)) return res def _cdf(self, x, s): \"\"\" cumulative probability function", "the distribution follows a log-normal distribution, while the remaining fraction `1 - frac`", "= np.random.uniform(log_min, log_max, size) return np.exp(res) def dist_skewness(dist): \"\"\" returns the skewness of", "numbers dist = lognorm_mean_var(1, var_norm) vals = dist.rvs((int(sim_terms), count)).sum(axis=1) # get the histogram", "density function at value \"\"\" if mask is None: mean = self.mean var", "is None: # self is sampled S = other.std else: # both are", "and extend it. ''' from __future__ import division import numpy as np from", "res = np.zeros_like(x) idx = (1 < x*s) & (x < s) res[idx]", "- minimize the error in the interval \"\"\" if method == 'fenton': #", "- match the first two moments of the distribution `leastsq` - minimize the", "with mean and variance determined from the given parameters. Here, several methods can", "log_s = np.log(s) res[idx] = (log_s + np.log(x[idx]))/(2 * log_s) res[x > s]", "self.method == 'sum': factor = np.exp(-x[nz, None] * self.rates[..., :]) \\ / self.rates[...,", "np.sum(self._terms * factor) else: Theta = np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1) res =", "= mean * (2*scale*np.log(scale)) / (scale**2 - 1) return (width / scale, width", "is given by width**2 \"\"\" if width == 1: # treat special case", "res = 0 else: if self.method == 'sum': factor = np.exp(-x*self.rates)/self.ratesx res[nz] =", "density function \"\"\" s, frac = s[0], frac[0] # reset broadcasting return frac", "return mean, var, None, None LogUniformDistribution = LogUniformDistribution_gen( a=0, name='LogUniformDistribution' ) class HypoExponentialDistribution(object):", "LogUniformDistribution(scale=scale, s=width) def loguniform_mean_var(mean, var): \"\"\" returns a loguniform distribution parameterized by its", "Unfortunately, the framework supplied by scipy.stats.rv_continuous does not support a variable number of", "= other.std else: # both are sampled expr = ((self.count - 1)*self.var +", "This module provides functions and classes for probability distributions, which build upon the", "\"\"\" random variates \"\"\" # choose the receptor response characteristics return sum(np.random.exponential(scale=alpha, size=size)", "kind) return dist def welch_test(self, other): \"\"\" performs Welch's t-test of two normal", "range=[0, val_max], density=True) def pdf_diff(params): \"\"\" evaluate the estimated pdf \"\"\" scale, sigma", "factor = np.exp(-x[nz, None]*self.rates[..., :]) res = 1 - np.sum(self._terms[..., :] * factor,", "parameterized by its mean and a spread parameter `sigma` \"\"\" if sigma ==", "value - self.mean mean = self.mean + delta/count M2 = M2 + delta*(value", "a loguniform distribution parameterized by its mean and variance. Here, we need to", "\"\"\" cumulative probability function \"\"\" s, frac = s[0], frac[0] # reset broadcasting", "s] = 1 return (1 - frac) + frac*res def _ppf(self, q, s,", "return random_log_uniform(1/s, s, self._size) def _pdf(self, x, s): \"\"\" probability density function \"\"\"", "distributions, which build upon the scipy.stats package and extend it. ''' from __future__", "res = np.exp(s * np.random.standard_normal(self._size)) if frac != 1: # switch off items", "xs = 0.5*(bins[:-1] + bins[1:]) density, _ = np.histogram(vals, bins=bins, range=[0, val_max], density=True)", "\\ * np.exp(-0.5*(value - mean)**2 / var) def add_observation(self, value): \"\"\" add an", "'[`fenton`, `leastsq`].') def gamma_mean_var(mean, variance): \"\"\" returns a gamma distribution with given mean", "special case separately return DeterministicDistribution(mean) else: # determine width parameter numerically cv2 =", "\"\"\" log_min, log_max = np.log(v_min), np.log(v_max) res = np.random.uniform(log_min, log_max, size) return np.exp(res)", "distributed uniformly in log space \"\"\" log_min, log_max = np.log(v_min), np.log(v_max) res =", "mean of the distribution \"\"\" return self.alpha.sum() def variance(self): \"\"\" variance of the", "also be numpy arrays to represent many distributions efficiently \"\"\" self.mean = mean", "might degrade accuracy and performance of the result \"\"\" if var < 0:", "need to solve a non-linear equation numerically, which might degrade accuracy and performance", "3*mean*var - mean**3) / var**(3/2) class DeterministicDistribution_gen(stats.rv_continuous): \"\"\" deterministic distribution that always returns", "== 'kullback-leibler': dist = 0.5*(np.log(other.var/self.var) + (self.var + (self.mean - self.mean)**2)/other.var - 1)", "= 1 return res def _ppf(self, q, s): \"\"\" percent point function (inverse", "current implementation only supports cases ' 'where all rates are different from each", "return prob def overlap(self, other, common_variance=True): \"\"\" estimates the amount of overlap between", "that the distribution yields a given mean and variance. The optional parameter `definition`", "raise ValueError('Unknown distance `%s`' % kind) return dist def welch_test(self, other): \"\"\" performs", "= np.exp(-x*self.rates)/self.ratesx res[nz] = np.sum(self._terms * factor) else: Theta = np.diag(-self.rates, 0) +", "if other.count is None: # self is sampled S = other.std else: #", "= lognorm_mean_var(1, var_norm) vals = dist.rvs((int(sim_terms), count)).sum(axis=1) # get the histogram val_max =", "size): \"\"\" random variates \"\"\" # choose the receptor response characteristics return sum(np.random.exponential(scale=alpha,", "dof = (s1 + s2)**2/(s1**2/nu1 + s2**2/nu2) # calculate the Welch t-value t", "lognorm_mean(mean, sigma): \"\"\" returns a lognormal distribution parameterized by its mean and a", "other.var/other.count nu1, nu2 = self.count - 1, other.count - 1 dof = (s1", "frac): \"\"\" percent point function (inverse of cdf) \"\"\" s, frac = s[0],", "coefficient of variation \"\"\" return 0.5 * (q + 1) * np.log(q) /", "calculate terms that we need later with np.errstate(divide='ignore'): mat = self.alpha[:, None] \\", "(2*scale*np.log(scale)) / (scale**2 - 1) return (width / scale, width * scale) def", "# switch off items randomly res[np.random.random(self._size) > frac] = 0 return res def", "0) res[idx] = s**(2*q[idx] - 1) return res def _stats(self, s): \"\"\" calculates", "return the interval in which the PDF of the distribution is non-zero \"\"\"", "for calculating the cdf and can be either `sum` or `eigen` \"\"\" if", "log-normally distributed random variables with mean 1 and variance `var_norm`. These parameters are", "framework supplied by scipy.stats.rv_continuous does not support a variable number of parameters and", "pdf(self, value, mask=None): \"\"\" return probability density function at value \"\"\" if mask", "variance/mean2)) if definition == 'scipy': return mu, sigma elif definition == 'numpy': return", "is non-zero \"\"\" extra_args, _, _, _ = self._parse_args_stats(*args, **kwds) mean = self.mean(*args,", "if variance == 0: return DeterministicDistribution(mean) else: scale, sigma = lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy')", "distribution of the sum of `count` log-normally distributed variables with `mean` and `variance`.", "= s[0] # reset broadcasting res = np.zeros_like(q) idx = (q > 0)", "log-normal distribution that estimates the sum of `count` log-normally distributed random variables with", "which build upon the scipy.stats package and extend it. ''' from __future__ import", "numpy as np from scipy import stats, special, linalg, optimize from ..data_structures.cache import", "with `mean` and `variance`. The returned distribution is again log-normal with mean and", "DeterministicDistribution(mean) else: scale, sigma = lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy') return stats.lognorm(scale=scale, s=sigma) def lognorm_sum_leastsq(count,", "if np.any(rates <= 0): raise ValueError('All rates must be positive') if len(np.unique(self.alpha)) !=", "loguniform distribution parameterized by its mean and variance. Here, we need to solve", "\"\"\" s, frac = s[0], frac[0] # reset broadcasting return 1 + frac*(-0.5", "== 0: return DeterministicDistribution(mean) else: scale, sigma = lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy') return stats.lognorm(scale=scale,", "def _rvs(self, s, frac): \"\"\" random variates \"\"\" # choose the receptor response", "rates are different from each other.') # calculate terms that we need later", "LogUniformDistribution_gen( a=0, name='LogUniformDistribution' ) class HypoExponentialDistribution(object): \"\"\" Hypoexponential distribution. Unfortunately, the framework supplied", "(self.mean - other.mean)/np.sqrt(s1 + s2) # calculate the probability using the Student's T", "class LogUniformDistribution_gen(stats.rv_continuous): \"\"\" Log-uniform distribution. \"\"\" def freeze(self, *args, **kwds): frozen = super(LogUniformDistribution_gen,", "mean = self.mean(*args, **kwds) scale = extra_args[0] width = mean * (2*scale*np.log(scale)) /", "# ============================================================================== class PartialLogNormDistribution_gen(stats.rv_continuous): \"\"\" partial log-normal distribution. a fraction `frac` of the", "variance. Here, we need to solve a non-linear equation numerically, which might degrade", "randomly if frac != 1: res[np.random.random(self._size) > frac] = 0 return res def", "* log_s) res[x > s] = 1 return (1 - frac) + frac*res", "sum_var = count * var_norm # get random numbers dist = lognorm_mean_var(1, var_norm)", "The ratio between the maximal value and the minimal value is given by", "an observed value and adjust mean and variance of the distribution. This returns", "- (self.alpha.sum())**2) def pdf(self, x): \"\"\" probability density function \"\"\" if not np.isscalar(x):", "idx = (1 < x*s) & (x < s) res[idx] = frac/(x[idx] *", "self.rates[..., :]) \\ / self.rates[..., :] res[nz] = np.sum(self._terms[..., :] * factor, axis=1)", "- 1 - cv2 width = optimize.newton(_rhs, 1.1) return loguniform_mean(mean, np.sqrt(width)) def random_log_uniform(v_min,", "& (x < s) res[idx] = frac/(x[idx] * np.log(s*s)) return res def _cdf(self,", "(1 - frac) + frac*res def _ppf(self, q, s, frac): \"\"\" percent point", "= 0.25*(term1 + term2) elif kind == 'hellinger': dist_b = self.distance(other, kind='bhattacharyya') dist", "*args, **kwds): \"\"\" return the interval in which the PDF of the distribution", "import numpy as np from scipy import stats, special, linalg, optimize from ..data_structures.cache", "density function \"\"\" s, frac = s[0], frac[0] # reset broadcasting res =", "count * var_norm # get random numbers dist = lognorm_mean_var(1, var_norm) vals =", "minimal value is given by width**2 \"\"\" if width == 1: # treat", "& (x < s) log_s = np.log(s) res[idx] = (log_s + np.log(x[idx]))/(2 *", "/ frac res = np.zeros_like(q) idx = (q_scale > 0) res[idx] = np.exp(s", "T distribution prob = stats.t.sf(np.abs(t), dof) * 2 return prob def overlap(self, other,", "random numbers \"\"\" sum_mean = count sum_var = count * var_norm # get", "q_scale = (q - (1 - frac)) / frac res = np.zeros_like(q) idx", "of the distribution is non-zero \"\"\" extra_args, _, _, _ = self._parse_args_stats(*args, **kwds)", "HypoExponentialDistribution(object): \"\"\" Hypoexponential distribution. Unfortunately, the framework supplied by scipy.stats.rv_continuous does not support", "params_init = lognorm_mean_var_to_mu_sigma(sum_mean, sum_var, 'scipy') params, _ = optimize.leastsq(pdf_diff, params_init) return params def", "non-zero \"\"\" extra_args, _, _, _ = self._parse_args_stats(*args, **kwds) mean = self.mean(*args, **kwds)", "s, frac): \"\"\" random variates \"\"\" # choose the items response characteristics res", "first two moments of the distribution `leastsq` - minimize the error in the", "remaining fraction `1 - frac` is zero Similar to the lognorm distribution, this", "1: res[np.random.random(self._size) > frac] = 0 return res def _pdf(self, x, s, frac):", "\"\"\" returns the skewness of the distribution `dist` \"\"\" mean = dist.mean() var", "3 * np.sqrt(sum_var) bins = np.linspace(0, val_max, bins + 1) xs = 0.5*(bins[:-1]", "partial log-uniform distribution. a fraction `frac` of the distribution follows a log-uniform distribution,", "# get the histogram val_max = sum_mean + 3 * np.sqrt(sum_var) bins =", "+ delta*(value - mean) return NormalDistribution(mean, M2/(count - 1), count) def distance(self, other,", "def freeze(self, *args, **kwds): frozen = super(LogUniformDistribution_gen, self).freeze(*args, **kwds) frozen.support = self.support(*args, **kwds)", "point function (inverse of cdf) \"\"\" s, frac = s[0], frac[0] # reset", "- np.sum(self._terms[..., :] * factor, axis=1) elif x == 0: res = 0", "\"\"\" returns an estimate of the distribution of the sum of `count` log-normally", "1/np.sqrt(2*np.pi) class NormalDistribution(object): \"\"\" class representing normal distributions \"\"\" def __init__(self, mean, var,", "distribution. `rates` are the rates of the underlying exponential processes `method` determines what", "pdf \"\"\" scale, sigma = params return stats.lognorm.pdf(xs, scale=scale, s=sigma) - density #", "/ variance beta = variance / mean return stats.gamma(scale=beta, a=alpha) def loguniform_mean(mean, width):", "the histogram val_max = sum_mean + 3 * np.sqrt(sum_var) bins = np.linspace(0, val_max,", "characteristics return random_log_uniform(1/s, s, self._size) def _pdf(self, x, s): \"\"\" probability density function", "only works if count was set \"\"\" if self.count is None: return self.copy()", "(width**2 - 1) return LogUniformDistribution(scale=scale, s=width) def loguniform_mean_var(mean, var): \"\"\" returns a loguniform", "reset broadcasting res = np.zeros_like(q) idx = (q > 0) res[idx] = s**(2*q[idx]", "def __init__(self, mean, var, count=None): \"\"\" normal distributions are described by their mean", "distribution. \"\"\" def freeze(self, *args, **kwds): frozen = super(LogUniformDistribution_gen, self).freeze(*args, **kwds) frozen.support =", "__future__ import division import numpy as np from scipy import stats, special, linalg,", "method in {'sum', 'eigen'}: self.method = method # prepare the rates of the", "= \\ 1 - linalg.expm(x.flat[i]*Theta)[0, :].sum() elif x == 0: res = 0", "# treat special case separately return DeterministicDistribution(mean) else: scale = mean * (2*width*np.log(width))", "probability density function \"\"\" s, frac = s[0], frac[0] # reset broadcasting return", "parameterized by its mean and variance. Here, we need to solve a non-linear", "Here, we need to solve a non-linear equation numerically, which might degrade accuracy", "def _ppf(self, q, s): \"\"\" percent point function (inverse of cdf) \"\"\" s", "PartialLogUniformDistribution = PartialLogUniformDistribution_gen( a=0, name='PartialLogUniformDistribution' ) NORMAL_DISTRIBUTION_NORMALIZATION = 1/np.sqrt(2*np.pi) class NormalDistribution(object): \"\"\" class", "return frac / (s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2) def _cdf(self, x, s, frac): \"\"\" cumulative", "\\ 1 - linalg.expm(x.flat[i]*Theta)[0, :].sum() elif x == 0: res = 0 else:", "of the distribution follows a log-uniform distribution, while the remaining fraction `1 -", "* s**2 * np.log(s)**2) return mean, var, None, None LogUniformDistribution = LogUniformDistribution_gen( a=0,", "+ 0.5*special.erf(np.log(x)/(s*np.sqrt(2)))) def _ppf(self, q, s, frac): \"\"\" percent point function (inverse of", "np.exp(s * np.random.standard_normal(self._size)) if frac != 1: # switch off items randomly res[np.random.random(self._size)", "# OLD DISTRIBUTIONS THAT MIGHT NOT BE NEEDED ANYMORE # ============================================================================== class PartialLogNormDistribution_gen(stats.rv_continuous):", "self.count = count def copy(self): return self.__class__(self.mean, self.var, self.count) @cached_property() def std(self): \"\"\"", "'hellinger': dist_b = self.distance(other, kind='bhattacharyya') dist = np.sqrt(1 - np.exp(-dist_b)) else: raise ValueError('Unknown", "in which the PDF of the distribution is non-zero \"\"\" extra_args, _, _,", "M2 + delta*(value - mean) return NormalDistribution(mean, M2/(count - 1), count) def distance(self,", "for probability distributions, which build upon the scipy.stats package and extend it. '''", "res[idx] = frac/(x[idx] * np.log(s*s)) return res def _cdf(self, x, s, frac): \"\"\"", "can be either `sum` or `eigen` \"\"\" if method in {'sum', 'eigen'}: self.method", "NOT BE NEEDED ANYMORE # ============================================================================== class PartialLogNormDistribution_gen(stats.rv_continuous): \"\"\" partial log-normal distribution. a", "else: raise ValueError('Unknown method `%s` for determining the sum of ' 'lognormal distributions.", "density function \"\"\" if not np.isscalar(x): x = np.asarray(x) res = np.zeros_like(x) nz", "if self.method == 'sum': factor = np.exp(-x[nz, None] * self.rates[..., :]) \\ /", "cached_property def lognorm_mean_var_to_mu_sigma(mean, variance, definition='scipy'): \"\"\" determines the parameters of the log-normal distribution", "rates, method='sum'): \"\"\" initializes the hypoexponential distribution. `rates` are the rates of the", "res PartialLogUniformDistribution = PartialLogUniformDistribution_gen( a=0, name='PartialLogUniformDistribution' ) NORMAL_DISTRIBUTION_NORMALIZATION = 1/np.sqrt(2*np.pi) class NormalDistribution(object): \"\"\"", "`variance`. The returned distribution is again log-normal with mean and variance determined from", "distance(self, other, kind='kullback-leibler'): \"\"\" return the distance between two normal distributions \"\"\" if", "= self.std else: if other.count is None: # self is sampled S =", "def overlap(self, other, common_variance=True): \"\"\" estimates the amount of overlap between two distributions", "\"\"\" if variance == 0: return DeterministicDistribution(mean) else: scale, sigma = lognorm_mean_var_to_mu_sigma(mean, variance,", "'scipy') params, _ = optimize.leastsq(pdf_diff, params_init) return params def lognorm_sum(count, mean, variance, method='fenton'):", "= np.sqrt(np.log(1 + variance/mean2)) if definition == 'scipy': return mu, sigma elif definition", "to a histogram obtained by drawing `sim_terms` random numbers \"\"\" sum_mean = count", "np.zeros_like(q) idx = (q_scale > 0) res[idx] = s**(2*q_scale[idx] - 1) return res", "= np.zeros_like(x) idx = (1 < x*s) & (x < s) res[idx] =", "DeterministicDistribution_gen( name='DeterministicDistribution' ) class LogUniformDistribution_gen(stats.rv_continuous): \"\"\" Log-uniform distribution. \"\"\" def freeze(self, *args, **kwds):", "of the underlying exponential processes `method` determines what method is used for calculating", "width * scale) def _rvs(self, s): \"\"\" random variates \"\"\" # choose the", "of the resulting parameters that is suitable for the given software package. \"\"\"", "= (log_s + np.log(x[idx]))/(2 * log_s) res[x > s] = 1 return (1", "np.log(x[idx]))/(2 * log_s) res[x > s] = 1 return res def _ppf(self, q,", "probability density function \"\"\" if not np.isscalar(x): x = np.asarray(x) res = np.zeros_like(x)", "overlap(self, other, common_variance=True): \"\"\" estimates the amount of overlap between two distributions \"\"\"", "self is sampled S = other.std else: # both are sampled expr =", "determines what method is used for calculating the cdf and can be either", "scale, width * scale) def _rvs(self, s): \"\"\" random variates \"\"\" # choose", "0): raise ValueError('All rates must be positive') if len(np.unique(self.alpha)) != len(self.alpha): raise ValueError('The", "* factor) return res # ============================================================================== # OLD DISTRIBUTIONS THAT MIGHT NOT BE", "np.sum(self.alpha**2 * self._terms) - (self.alpha.sum())**2) def pdf(self, x): \"\"\" probability density function \"\"\"", "and the minimal value is given by width**2 \"\"\" if width == 1:", "from the given parameters. Here, several methods can be used: `fenton` - match", "probability density function at value \"\"\" if mask is None: mean = self.mean", "switch off items randomly res[np.random.random(self._size) > frac] = 0 return res def _pdf(self,", "the distribution yields a given mean and variance. The optional parameter `definition` can", "(1 - frac)) / frac res = np.zeros_like(q) idx = (q_scale > 0)", "\"\"\" s, frac = s[0], frac[0] # reset broadcasting q_scale = (q -", "Code copied from https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous \"\"\" def _cdf(self, x): return np.where(x < 0, 0.,", "The returned distribution is again log-normal with mean and variance determined from the", "1/var_ratio + 2)) term2 = (self.mean - other.mean)**2/(self.var + other.var) dist = 0.25*(term1", "\"\"\" performs Welch's t-test of two normal distributions \"\"\" # calculate the degrees", "self.std else: if other.count is None: # self is sampled S = other.std", "1) return (width / scale, width * scale) def _rvs(self, s): \"\"\" random", "using the Student's T distribution prob = stats.t.sf(np.abs(t), dof) * 2 return prob", "np.sqrt(expr/(self.count + other.count - 2)) delta = np.abs(self.mean - other.mean)/S return 2*stats.norm.cdf(-0.5*delta) else:", "the least square fitting params_init = lognorm_mean_var_to_mu_sigma(sum_mean, sum_var, 'scipy') params, _ = optimize.leastsq(pdf_diff,", "> 0) res[idx] = s**(2*q[idx] - 1) return res def _stats(self, s): \"\"\"", "name='DeterministicDistribution' ) class LogUniformDistribution_gen(stats.rv_continuous): \"\"\" Log-uniform distribution. \"\"\" def freeze(self, *args, **kwds): frozen", "0 else: if self.method == 'sum': factor = np.exp(-x*self.rates)/self.ratesx res[nz] = np.sum(self._terms *", "the degrees of freedom s1, s2 = self.var/self.count, other.var/other.count nu1, nu2 = self.count", "for determining the sum of ' 'lognormal distributions. Accepted methods are ' '[`fenton`,", "frac = s[0], frac[0] # reset broadcasting return frac / (s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2)", "log_min, log_max = np.log(v_min), np.log(v_max) res = np.random.uniform(log_min, log_max, size) return np.exp(res) def", "linalg.expm(x*Theta)[0, :].sum() return res def cdf(self, x): \"\"\" cumulative density function \"\"\" if", "if self.method == 'sum': factor = np.exp(-x*self.rates)/self.ratesx res[nz] = np.sum(self._terms * factor) else:", "fraction `1 - frac` is zero Similar to the lognorm distribution, this does", "(1 < x*s) & (x < s) res[idx] = frac/(x[idx] * np.log(s*s)) return", "_ppf(self, q, s): \"\"\" percent point function (inverse of cdf) \"\"\" s =", "1 - linalg.expm(x.flat[i]*Theta)[0, :].sum() elif x == 0: res = 0 else: if", "of the distribution \"\"\" mean = (s**2 - 1)/(2*s*np.log(s)) var = ((s**4 -", "* np.sum(self.alpha**2 * self._terms) - (self.alpha.sum())**2) def pdf(self, x): \"\"\" probability density function", "pdf(self, x): \"\"\" probability density function \"\"\" if not np.isscalar(x): x = np.asarray(x)", "performs Welch's t-test of two normal distributions \"\"\" # calculate the degrees of", "x*s) & (x < s) res[idx] = 1/(x[idx] * np.log(s*s)) return res def", "distributed random variables with mean 1 and variance `var_norm`. These parameters are determined", "= count * var_norm # get random numbers dist = lognorm_mean_var(1, var_norm) vals", "width = optimize.newton(_rhs, 1.1) return loguniform_mean(mean, np.sqrt(width)) def random_log_uniform(v_min, v_max, size): \"\"\" returns", "np.log(s) - (s**2 - 1)**2) \\ / (4 * s**2 * np.log(s)**2) return", "== 0: res = 0 else: if self.method == 'sum': factor = np.exp(-x*self.rates)/self.ratesx", "= 1 - np.sum(self._terms[..., :] * factor, axis=1) elif x == 0: res", "`sigma` \"\"\" if sigma == 0: return DeterministicDistribution(mean) else: mu = mean *", "self.alpha[None, :]) mat[(self.alpha[:, None] - self.alpha[None, :]) == 0] = 1 self._terms =", "frac` is zero \"\"\" def _rvs(self, s, frac): \"\"\" random variates \"\"\" #", "that estimates the sum of `count` log-normally distributed random variables with mean 1", "ValueError('Unknown method `%s` for determining the sum of ' 'lognormal distributions. Accepted methods", "the resulting parameters that is suitable for the given software package. \"\"\" mean2", "scipy import stats, special, linalg, optimize from ..data_structures.cache import cached_property def lognorm_mean_var_to_mu_sigma(mean, variance,", "mean and variance. Here, we need to solve a non-linear equation numerically, which", "- mean**3) / var**(3/2) class DeterministicDistribution_gen(stats.rv_continuous): \"\"\" deterministic distribution that always returns a", "1)/(2*s*np.log(s)) var = ((s**4 - 1) * np.log(s) - (s**2 - 1)**2) \\", "distribution `dist` \"\"\" mean = dist.mean() var = dist.var() return (dist.moment(3) - 3*mean*var", ":]) == 0] = 1 self._terms = np.prod(mat, 1) def rvs(self, size): \"\"\"", "random variables that a distributed uniformly in log space \"\"\" log_min, log_max =", "s**(2*q_scale[idx] - 1) return res PartialLogUniformDistribution = PartialLogUniformDistribution_gen( a=0, name='PartialLogUniformDistribution' ) NORMAL_DISTRIBUTION_NORMALIZATION =", "= self.var[mask] std = self.std[mask] return NORMAL_DISTRIBUTION_NORMALIZATION/std \\ * np.exp(-0.5*(value - mean)**2 /", "- 1)/(2*s*np.log(s)) var = ((s**4 - 1) * np.log(s) - (s**2 - 1)**2)", "val_max, bins + 1) xs = 0.5*(bins[:-1] + bins[1:]) density, _ = np.histogram(vals,", "- np.exp(-dist_b)) else: raise ValueError('Unknown distance `%s`' % kind) return dist def welch_test(self,", "self).freeze(*args, **kwds) frozen.support = self.support(*args, **kwds) return frozen def support(self, *args, **kwds): \"\"\"", "and only works if count was set \"\"\" if self.count is None: return", "+ np.diag(self.rates[:-1], 1) res = 1 - linalg.expm(x*Theta)[0, :].sum() return res def cdf(self,", "scale=scale, s=sigma) - density # do the least square fitting params_init = lognorm_mean_var_to_mu_sigma(sum_mean,", "to choose a definition of the resulting parameters that is suitable for the", "variance) sigma = np.sqrt(np.log(1 + variance/mean2)) if definition == 'scipy': return mu, sigma", "treat special case separately return DeterministicDistribution(mean) else: # determine width parameter numerically cv2", "self.distance(other, kind='bhattacharyya') dist = np.sqrt(1 - np.exp(-dist_b)) else: raise ValueError('Unknown distance `%s`' %", "factor, axis=1) else: Theta = (np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1)) for i in", "- self.alpha[None, :]) mat[(self.alpha[:, None] - self.alpha[None, :]) == 0] = 1 self._terms", "_cdf(self, x, s): \"\"\" cumulative probability function \"\"\" s = s[0] # reset", "distributions are described by their mean and variance. Additionally, count denotes how many", "that a distributed uniformly in log space \"\"\" log_min, log_max = np.log(v_min), np.log(v_max)", "* np.exp(-0.5 * sigma**2) return stats.lognorm(scale=mu, s=sigma) def lognorm_mean_var(mean, variance): \"\"\" returns a", "0. def _rvs(self): return np.zeros(self._size) DeterministicDistribution = DeterministicDistribution_gen( name='DeterministicDistribution' ) class LogUniformDistribution_gen(stats.rv_continuous): \"\"\"", "of the distribution. This returns a new distribution and only works if count", "- 1 dof = (s1 + s2)**2/(s1**2/nu1 + s2**2/nu2) # calculate the Welch", "(self.mean - self.mean)**2)/other.var - 1) elif kind == 'bhattacharyya': var_ratio = self.var/other.var term1", "elif x == 0: res = 0 else: if self.method == 'sum': factor", "* np.log(s)**2) return mean, var, None, None LogUniformDistribution = LogUniformDistribution_gen( a=0, name='LogUniformDistribution' )", "sampled S = other.std else: # both are sampled expr = ((self.count -", "1)*self.var + (other.count - 1)*other.var) S = np.sqrt(expr/(self.count + other.count - 2)) delta", "a loguniform distribution parameterized by its mean and a spread parameter `width`. The", "by drawing `sim_terms` random numbers \"\"\" sum_mean = count sum_var = count *", "a gamma distribution with given mean and variance \"\"\" alpha = mean**2 /", "definition='scipy'): \"\"\" determines the parameters of the log-normal distribution such that the distribution", "`rates` are the rates of the underlying exponential processes `method` determines what method", "self.alpha.sum() def variance(self): \"\"\" variance of the distribution \"\"\" return (2 * np.sum(self.alpha**2", "= M2 + delta*(value - mean) return NormalDistribution(mean, M2/(count - 1), count) def", "def std(self): \"\"\" return standard deviation \"\"\" return np.sqrt(self.var) def pdf(self, value, mask=None):", "package. \"\"\" mean2 = mean**2 mu = mean2/np.sqrt(mean2 + variance) sigma = np.sqrt(np.log(1", "+ 1) xs = 0.5*(bins[:-1] + bins[1:]) density, _ = np.histogram(vals, bins=bins, range=[0,", "new distribution and only works if count was set \"\"\" if self.count is", "parameter `sigma` \"\"\" if sigma == 0: return DeterministicDistribution(mean) else: mu = mean", "used to estimate the parameters. All values can also be numpy arrays to", "of the distribution follows a log-normal distribution, while the remaining fraction `1 -", "the parameters of the log-normal distribution such that the distribution yields a given", "`sum` or `eigen` \"\"\" if method in {'sum', 'eigen'}: self.method = method #", "other.') # calculate terms that we need later with np.errstate(divide='ignore'): mat = self.alpha[:,", "cv2 = var / mean**2 # match square coefficient of variation def _rhs(q):", "function (inverse of cdf) \"\"\" s, frac = s[0], frac[0] # reset broadcasting", "def lognorm_mean_var_to_mu_sigma(mean, variance, definition='scipy'): \"\"\" determines the parameters of the log-normal distribution such", "copy(self): return self.__class__(self.mean, self.var, self.count) @cached_property() def std(self): \"\"\" return standard deviation \"\"\"", "such that the distribution yields a given mean and variance. The optional parameter", "s, self._size) # switch off receptors randomly if frac != 1: res[np.random.random(self._size) >", "kind == 'bhattacharyya': var_ratio = self.var/other.var term1 = np.log(0.25*(var_ratio + 1/var_ratio + 2))", "/ (q - 1) - 1 - cv2 width = optimize.newton(_rhs, 1.1) return", "= mean**2 / variance beta = variance / mean return stats.gamma(scale=beta, a=alpha) def", "a lognormal distribution parameterized by its mean and a spread parameter `sigma` \"\"\"", "processes `method` determines what method is used for calculating the cdf and can", "mat[(self.alpha[:, None] - self.alpha[None, :]) == 0] = 1 self._terms = np.prod(mat, 1)", "''' Created on Feb 24, 2015 @author: <NAME> <<EMAIL>> This module provides functions", "**kwds) scale = extra_args[0] width = mean * (2*scale*np.log(scale)) / (scale**2 - 1)", "choose the receptor response characteristics return sum(np.random.exponential(scale=alpha, size=size) for alpha in self.alpha) def", "\"\"\" probability density function \"\"\" if not np.isscalar(x): x = np.asarray(x) res =", "calculate the degrees of freedom s1, s2 = self.var/self.count, other.var/other.count nu1, nu2 =", "the coefficient of variation \"\"\" return 0.5 * (q + 1) * np.log(q)", "# both are sampled expr = ((self.count - 1)*self.var + (other.count - 1)*other.var)", "\"\"\" def _rvs(self, s, frac): \"\"\" random variates \"\"\" # choose the receptor", "class representing normal distributions \"\"\" def __init__(self, mean, var, count=None): \"\"\" normal distributions", "\"\"\" if self.count is None: return self.copy() else: M2 = self.var*(self.count - 1)", "s=sigma) - density # do the least square fitting params_init = lognorm_mean_var_to_mu_sigma(sum_mean, sum_var,", "np.zeros_like(x) idx = (1 < x*s) & (x < s) log_s = np.log(s)", "mean, s=sigma) else: raise ValueError('Unknown method `%s` for determining the sum of '", "= super(LogUniformDistribution_gen, self).freeze(*args, **kwds) frozen.support = self.support(*args, **kwds) return frozen def support(self, *args,", "standard deviation \"\"\" return np.sqrt(self.var) def pdf(self, value, mask=None): \"\"\" return probability density", "if np.any(nz): factor = np.exp(-x[nz, None]*self.rates[..., :]) res = 1 - np.sum(self._terms[..., :]", "\\ / (self.alpha[:, None] - self.alpha[None, :]) mat[(self.alpha[:, None] - self.alpha[None, :]) ==", "0, 0., 1.) def _stats(self): return 0., 0., 0., 0. def _rvs(self): return", "that we need later with np.errstate(divide='ignore'): mat = self.alpha[:, None] \\ / (self.alpha[:,", "mean and variance. Additionally, count denotes how many observations were used to estimate", "stats.t.sf(np.abs(t), dof) * 2 return prob def overlap(self, other, common_variance=True): \"\"\" estimates the", "use the moments directly return lognorm_mean_var(count * mean, count * variance) elif method", "fitting the probability density function to a histogram obtained by drawing `sim_terms` random", "to the lognorm distribution, this does not support any location parameter \"\"\" def", "distribution parameterized by its mean and its variance. \"\"\" if variance == 0:", "/ (s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2) def _cdf(self, x, s, frac): \"\"\" cumulative probability function", "res def _cdf(self, x, s): \"\"\" cumulative probability function \"\"\" s = s[0]", "idx = (1 < x*s) & (x < s) res[idx] = 1/(x[idx] *", "np.prod(mat, 1) def rvs(self, size): \"\"\" random variates \"\"\" # choose the receptor", "lognorm_mean_var(1, var_norm) vals = dist.rvs((int(sim_terms), count)).sum(axis=1) # get the histogram val_max = sum_mean", "be either `sum` or `eigen` \"\"\" if method in {'sum', 'eigen'}: self.method =", "frac res = np.zeros_like(q) idx = (q_scale > 0) res[idx] = np.exp(s *", "histogram val_max = sum_mean + 3 * np.sqrt(sum_var) bins = np.linspace(0, val_max, bins", "self._size) # switch off receptors randomly if frac != 1: res[np.random.random(self._size) > frac]", "variance / mean return stats.gamma(scale=beta, a=alpha) def loguniform_mean(mean, width): \"\"\" returns a loguniform", "\"\"\" if sigma == 0: return DeterministicDistribution(mean) else: mu = mean * np.exp(-0.5", "factor) return res # ============================================================================== # OLD DISTRIBUTIONS THAT MIGHT NOT BE NEEDED", "a=0, name='LogUniformDistribution' ) class HypoExponentialDistribution(object): \"\"\" Hypoexponential distribution. Unfortunately, the framework supplied by", "mean and variance determined from the given parameters. Here, several methods can be", "cdf and can be either `sum` or `eigen` \"\"\" if method in {'sum',", "> s] = 1 return (1 - frac) + frac*res def _ppf(self, q,", "_stats(self, s): \"\"\" calculates statistics of the distribution \"\"\" mean = (s**2 -", "res def _pdf(self, x, s, frac): \"\"\" probability density function \"\"\" s, frac", "def _pdf(self, x, s, frac): \"\"\" probability density function \"\"\" s, frac =", "parameter numerically cv2 = var / mean**2 # match square coefficient of variation", "and classes for probability distributions, which build upon the scipy.stats package and extend", "* mean, count * variance) elif method == 'leastsq': # determine the moments", "efficiently \"\"\" self.mean = mean self.var = var self.count = count def copy(self):", "factor, axis=1) elif x == 0: res = 0 else: factor = np.exp(-x*self.rates)", "__init__(self, mean, var, count=None): \"\"\" normal distributions are described by their mean and", "function \"\"\" s, frac = s[0], frac[0] # reset broadcasting return 1 +", "in log space \"\"\" log_min, log_max = np.log(v_min), np.log(v_max) res = np.random.uniform(log_min, log_max,", "= optimize.leastsq(pdf_diff, params_init) return params def lognorm_sum(count, mean, variance, method='fenton'): \"\"\" returns an", "(s**2 - 1)**2) \\ / (4 * s**2 * np.log(s)**2) return mean, var,", "special.ndtri(q_scale[idx])) return res PartialLogNormDistribution = PartialLogNormDistribution_gen( a=0, name='PartialLogNormDistribution' ) class PartialLogUniformDistribution_gen(stats.rv_continuous): \"\"\" partial", "!= 1: res[np.random.random(self._size) > frac] = 0 return res def _pdf(self, x, s,", "self.mean(*args, **kwds) scale = extra_args[0] width = mean * (2*scale*np.log(scale)) / (scale**2 -", "= np.sqrt(expr/(self.count + other.count - 2)) delta = np.abs(self.mean - other.mean)/S return 2*stats.norm.cdf(-0.5*delta)", "_rvs(self, s, frac): \"\"\" random variates \"\"\" # choose the items response characteristics", "np.log(x[idx]))/(2 * log_s) res[x > s] = 1 return (1 - frac) +", "characteristics res = random_log_uniform(1/s, s, self._size) # switch off receptors randomly if frac", "either `sum` or `eigen` \"\"\" if method in {'sum', 'eigen'}: self.method = method", "val_max], density=True) def pdf_diff(params): \"\"\" evaluate the estimated pdf \"\"\" scale, sigma =", "probability density function to a histogram obtained by drawing `sim_terms` random numbers \"\"\"", "np.sqrt(width)) def random_log_uniform(v_min, v_max, size): \"\"\" returns random variables that a distributed uniformly", "+ np.log(x[idx]))/(2 * log_s) res[x > s] = 1 return res def _ppf(self,", "return 2*stats.norm.cdf(-0.5*delta) else: # here, we would have to integrate numerically raise NotImplementedError", "is suitable for the given software package. \"\"\" mean2 = mean**2 mu =", ":]) \\ / self.rates[..., :] res[nz] = np.sum(self._terms[..., :] * factor, axis=1) else:", "(inverse of cdf) \"\"\" s, frac = s[0], frac[0] # reset broadcasting q_scale", "= np.zeros_like(x) nz = (x > 0) if np.any(nz): if self.method == 'sum':", "is None: if other.count is None: # neither is sampled S = np.sqrt(0.5*(self.var", "of two normal distributions \"\"\" # calculate the degrees of freedom s1, s2", "np from scipy import stats, special, linalg, optimize from ..data_structures.cache import cached_property def", "can be used to choose a definition of the resulting parameters that is", "OLD DISTRIBUTIONS THAT MIGHT NOT BE NEEDED ANYMORE # ============================================================================== class PartialLogNormDistribution_gen(stats.rv_continuous): \"\"\"", "mean = self.mean[mask] var = self.var[mask] std = self.std[mask] return NORMAL_DISTRIBUTION_NORMALIZATION/std \\ *", "other.var) dist = 0.25*(term1 + term2) elif kind == 'hellinger': dist_b = self.distance(other,", "and performance of the result \"\"\" if var < 0: raise ValueError('Variance must", "of the distribution `dist` \"\"\" mean = dist.mean() var = dist.var() return (dist.moment(3)", "sigma = lognorm_sum_leastsq(count, var_norm) return stats.lognorm(scale=scale * mean, s=sigma) else: raise ValueError('Unknown method", "only supports cases ' 'where all rates are different from each other.') #", "_pdf(self, x, s, frac): \"\"\" probability density function \"\"\" s, frac = s[0],", "functions and classes for probability distributions, which build upon the scipy.stats package and", "alpha in self.alpha) def mean(self): \"\"\" mean of the distribution \"\"\" return self.alpha.sum()", "+ np.diag(self.rates[:-1], 1)) for i in np.flatnonzero(nz): res.flat[i] = \\ 1 - linalg.expm(x.flat[i]*Theta)[0,", "cdf(self, x): \"\"\" cumulative density function \"\"\" if not np.isscalar(x): x = np.asarray(x)", "parameterized by its mean and its variance. \"\"\" if variance == 0: return", ":] * factor, axis=1) else: Theta = (np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1)) for", "kind='kullback-leibler'): \"\"\" return the distance between two normal distributions \"\"\" if kind ==", "# self is sampled S = other.std else: # both are sampled expr", "count sum_var = count * var_norm # get random numbers dist = lognorm_mean_var(1,", "numerically cv2 = var / mean**2 # match square coefficient of variation def", "distribution is non-zero \"\"\" extra_args, _, _, _ = self._parse_args_stats(*args, **kwds) mean =", "need later with np.errstate(divide='ignore'): mat = self.alpha[:, None] \\ / (self.alpha[:, None] -", "size) return np.exp(res) def dist_skewness(dist): \"\"\" returns the skewness of the distribution `dist`", "sum_mean = count sum_var = count * var_norm # get random numbers dist", "methods are ' '[`fenton`, `leastsq`].') def gamma_mean_var(mean, variance): \"\"\" returns a gamma distribution", "used: `fenton` - match the first two moments of the distribution `leastsq` -", "kind='bhattacharyya') dist = np.sqrt(1 - np.exp(-dist_b)) else: raise ValueError('Unknown distance `%s`' % kind)", "denotes how many observations were used to estimate the parameters. All values can", "Additionally, count denotes how many observations were used to estimate the parameters. All", "size=size) for alpha in self.alpha) def mean(self): \"\"\" mean of the distribution \"\"\"", "self.support(*args, **kwds) return frozen def support(self, *args, **kwds): \"\"\" return the interval in", "point function (inverse of cdf) \"\"\" s = s[0] # reset broadcasting res", "1 self._terms = np.prod(mat, 1) def rvs(self, size): \"\"\" random variates \"\"\" #", "moments from fitting var_norm = variance / mean**2 scale, sigma = lognorm_sum_leastsq(count, var_norm)", "import cached_property def lognorm_mean_var_to_mu_sigma(mean, variance, definition='scipy'): \"\"\" determines the parameters of the log-normal", "definition `%s`' % definition) def lognorm_mean(mean, sigma): \"\"\" returns a lognormal distribution parameterized", "their mean and variance. Additionally, count denotes how many observations were used to", "else: scale, sigma = lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy') return stats.lognorm(scale=scale, s=sigma) def lognorm_sum_leastsq(count, var_norm,", "sigma**2) return stats.lognorm(scale=mu, s=sigma) def lognorm_mean_var(mean, variance): \"\"\" returns a lognormal distribution parameterized", "x, s): \"\"\" cumulative probability function \"\"\" s = s[0] # reset broadcasting", "& (x < s) res[idx] = 1/(x[idx] * np.log(s*s)) return res def _cdf(self,", "with mean 1 and variance `var_norm`. These parameters are determined by fitting the", "extra_args, _, _, _ = self._parse_args_stats(*args, **kwds) mean = self.mean(*args, **kwds) scale =", "distribution that always returns a given value Code copied from https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous \"\"\" def", "obtained by drawing `sim_terms` random numbers \"\"\" sum_mean = count sum_var = count", "pdf_diff(params): \"\"\" evaluate the estimated pdf \"\"\" scale, sigma = params return stats.lognorm.pdf(xs,", "return 0., 0., 0., 0. def _rvs(self): return np.zeros(self._size) DeterministicDistribution = DeterministicDistribution_gen( name='DeterministicDistribution'", "s, frac = s[0], frac[0] # reset broadcasting res = np.zeros_like(x) idx =", "res = np.zeros_like(q) idx = (q_scale > 0) res[idx] = s**(2*q_scale[idx] - 1)", "distributions efficiently \"\"\" self.mean = mean self.var = var self.count = count def", "std = self.std[mask] return NORMAL_DISTRIBUTION_NORMALIZATION/std \\ * np.exp(-0.5*(value - mean)**2 / var) def", "0) + np.diag(self.rates[:-1], 1) res = 1 - linalg.expm(x*Theta)[0, :].sum() return res def", "# treat special case separately return DeterministicDistribution(mean) else: # determine width parameter numerically", "mean and variance \"\"\" alpha = mean**2 / variance beta = variance /", "loguniform_mean_var(mean, var): \"\"\" returns a loguniform distribution parameterized by its mean and variance.", "\"\"\" s = s[0] # reset broadcasting res = np.zeros_like(x) idx = (1", ":].sum() return res def cdf(self, x): \"\"\" cumulative density function \"\"\" if not", "optimize from ..data_structures.cache import cached_property def lognorm_mean_var_to_mu_sigma(mean, variance, definition='scipy'): \"\"\" determines the parameters", "x = np.asarray(x) res = np.zeros_like(x) nz = (x > 0) if np.any(nz):", "normal distributions \"\"\" if kind == 'kullback-leibler': dist = 0.5*(np.log(other.var/self.var) + (self.var +", "< x*s) & (x < s) res[idx] = 1/(x[idx] * np.log(s*s)) return res", "= var / mean**2 # match square coefficient of variation def _rhs(q): \"\"\"", "given mean and variance \"\"\" alpha = mean**2 / variance beta = variance", "other.count is None: # self is sampled S = other.std else: # both", "resulting parameters that is suitable for the given software package. \"\"\" mean2 =", "\"\"\" return standard deviation \"\"\" return np.sqrt(self.var) def pdf(self, value, mask=None): \"\"\" return", "\"\"\" returns random variables that a distributed uniformly in log space \"\"\" log_min,", "> frac] = 0 return res def _pdf(self, x, s, frac): \"\"\" probability", "* variance) elif method == 'leastsq': # determine the moments from fitting var_norm", "def loguniform_mean_var(mean, var): \"\"\" returns a loguniform distribution parameterized by its mean and", "= np.zeros_like(q) idx = (q_scale > 0) res[idx] = np.exp(s * special.ndtri(q_scale[idx])) return", "= 0 else: if self.method == 'sum': factor = np.exp(-x*self.rates)/self.ratesx res[nz] = np.sum(self._terms", "+ term2) elif kind == 'hellinger': dist_b = self.distance(other, kind='bhattacharyya') dist = np.sqrt(1", "sum_mean + 3 * np.sqrt(sum_var) bins = np.linspace(0, val_max, bins + 1) xs", "- mean) return NormalDistribution(mean, M2/(count - 1), count) def distance(self, other, kind='kullback-leibler'): \"\"\"", "= (self.mean - other.mean)**2/(self.var + other.var) dist = 0.25*(term1 + term2) elif kind", "the log-normal distribution such that the distribution yields a given mean and variance.", "used for calculating the cdf and can be either `sum` or `eigen` \"\"\"", "of variation def _rhs(q): \"\"\" match the coefficient of variation \"\"\" return 0.5", "mask is None: mean = self.mean var = self.var std = self.std else:", "return DeterministicDistribution(mean) else: scale = mean * (2*width*np.log(width)) / (width**2 - 1) return", "/ (width**2 - 1) return LogUniformDistribution(scale=scale, s=width) def loguniform_mean_var(mean, var): \"\"\" returns a", "two normal distributions \"\"\" if kind == 'kullback-leibler': dist = 0.5*(np.log(other.var/self.var) + (self.var", "of the sum of `count` log-normally distributed variables with `mean` and `variance`. The", "\"\"\" return (2 * np.sum(self.alpha**2 * self._terms) - (self.alpha.sum())**2) def pdf(self, x): \"\"\"", "self.__class__(self.mean, self.var, self.count) @cached_property() def std(self): \"\"\" return standard deviation \"\"\" return np.sqrt(self.var)", "1 / self.rates if np.any(rates <= 0): raise ValueError('All rates must be positive')", "fitting var_norm = variance / mean**2 scale, sigma = lognorm_sum_leastsq(count, var_norm) return stats.lognorm(scale=scale", "def _cdf(self, x, s, frac): \"\"\" cumulative probability function \"\"\" s, frac =", "if other.count is None: # neither is sampled S = np.sqrt(0.5*(self.var + other.var))", "**kwds) mean = self.mean(*args, **kwds) scale = extra_args[0] width = mean * (2*scale*np.log(scale))", "the distribution of the sum of `count` log-normally distributed variables with `mean` and", "the hypoexponential distribution. `rates` are the rates of the underlying exponential processes `method`", "var_norm = variance / mean**2 scale, sigma = lognorm_sum_leastsq(count, var_norm) return stats.lognorm(scale=scale *", "the parameters. All values can also be numpy arrays to represent many distributions", "self.alpha[None, :]) == 0] = 1 self._terms = np.prod(mat, 1) def rvs(self, size):", "other.mean)**2/(self.var + other.var) dist = 0.25*(term1 + term2) elif kind == 'hellinger': dist_b", "\"\"\" returns a loguniform distribution parameterized by its mean and a spread parameter", "= (s**2 - 1)/(2*s*np.log(s)) var = ((s**4 - 1) * np.log(s) - (s**2", "mean and a spread parameter `sigma` \"\"\" if sigma == 0: return DeterministicDistribution(mean)", "= (1 < x*s) & (x < s) log_s = np.log(s) res[idx] =", "(x < s) log_s = np.log(s) res[idx] = (log_s + np.log(x[idx]))/(2 * log_s)", "a given mean and variance. The optional parameter `definition` can be used to", "\"\"\" return 0.5 * (q + 1) * np.log(q) / (q - 1)", "\"\"\" mean = dist.mean() var = dist.var() return (dist.moment(3) - 3*mean*var - mean**3)", "optimize.newton(_rhs, 1.1) return loguniform_mean(mean, np.sqrt(width)) def random_log_uniform(v_min, v_max, size): \"\"\" returns random variables", "- 1) return res def _stats(self, s): \"\"\" calculates statistics of the distribution", "different from each other.') # calculate terms that we need later with np.errstate(divide='ignore'):", "if sigma == 0: return DeterministicDistribution(mean) else: mu = mean * np.exp(-0.5 *", "variation def _rhs(q): \"\"\" match the coefficient of variation \"\"\" return 0.5 *", "extend it. ''' from __future__ import division import numpy as np from scipy", "return lognorm_mean_var(count * mean, count * variance) elif method == 'leastsq': # determine", "which the PDF of the distribution is non-zero \"\"\" extra_args, _, _, _", "again log-normal with mean and variance determined from the given parameters. Here, several", "freeze(self, *args, **kwds): frozen = super(LogUniformDistribution_gen, self).freeze(*args, **kwds) frozen.support = self.support(*args, **kwds) return", "- 1) return LogUniformDistribution(scale=scale, s=width) def loguniform_mean_var(mean, var): \"\"\" returns a loguniform distribution", "distributions. Accepted methods are ' '[`fenton`, `leastsq`].') def gamma_mean_var(mean, variance): \"\"\" returns a", "============================================================================== class PartialLogNormDistribution_gen(stats.rv_continuous): \"\"\" partial log-normal distribution. a fraction `frac` of the distribution", "def variance(self): \"\"\" variance of the distribution \"\"\" return (2 * np.sum(self.alpha**2 *", "'lognormal distributions. Accepted methods are ' '[`fenton`, `leastsq`].') def gamma_mean_var(mean, variance): \"\"\" returns", "self.std else: mean = self.mean[mask] var = self.var[mask] std = self.std[mask] return NORMAL_DISTRIBUTION_NORMALIZATION/std", "(other.count - 1)*other.var) S = np.sqrt(expr/(self.count + other.count - 2)) delta = np.abs(self.mean" ]
[ "while posY < linec: if lines[posY][posX % linelen] == '#': trees += 1", "<gh_stars>0 lines = [] with open('input.txt') as f: lines = f.readlines() lines =", "f.readlines() lines = list(map(lambda x: x.strip(), lines)) linelen = len(lines[0]) linec = len(lines)", "f: lines = f.readlines() lines = list(map(lambda x: x.strip(), lines)) linelen = len(lines[0])", "= 0 trees = 0 while posY < linec: if lines[posY][posX % linelen]", "= [] with open('input.txt') as f: lines = f.readlines() lines = list(map(lambda x:", "'#': trees += 1 posX+=x posY+=y return trees print(f'part 1: {check(3, 1)}') slopes", "posY+=y return trees print(f'part 1: {check(3, 1)}') slopes = [(1, 1), (3, 1),", "= f.readlines() lines = list(map(lambda x: x.strip(), lines)) linelen = len(lines[0]) linec =", "1), (1, 2)] result = 1 for i in slopes: result *= check(i[0],", "0 while posY < linec: if lines[posY][posX % linelen] == '#': trees +=", "linec: if lines[posY][posX % linelen] == '#': trees += 1 posX+=x posY+=y return", "trees print(f'part 1: {check(3, 1)}') slopes = [(1, 1), (3, 1), (5, 1),", "1), (3, 1), (5, 1), (7, 1), (1, 2)] result = 1 for", "len(lines[0]) linec = len(lines) def check(x, y): posX = 0 posY = 0", "slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)] result", "(1, 2)] result = 1 for i in slopes: result *= check(i[0], i[1])", "1 posX+=x posY+=y return trees print(f'part 1: {check(3, 1)}') slopes = [(1, 1),", "linelen = len(lines[0]) linec = len(lines) def check(x, y): posX = 0 posY", "with open('input.txt') as f: lines = f.readlines() lines = list(map(lambda x: x.strip(), lines))", "lines = list(map(lambda x: x.strip(), lines)) linelen = len(lines[0]) linec = len(lines) def", "= len(lines) def check(x, y): posX = 0 posY = 0 trees =", "0 posY = 0 trees = 0 while posY < linec: if lines[posY][posX", "trees = 0 while posY < linec: if lines[posY][posX % linelen] == '#':", "< linec: if lines[posY][posX % linelen] == '#': trees += 1 posX+=x posY+=y", "+= 1 posX+=x posY+=y return trees print(f'part 1: {check(3, 1)}') slopes = [(1,", "2)] result = 1 for i in slopes: result *= check(i[0], i[1]) print(f'part", "lines[posY][posX % linelen] == '#': trees += 1 posX+=x posY+=y return trees print(f'part", "trees += 1 posX+=x posY+=y return trees print(f'part 1: {check(3, 1)}') slopes =", "= list(map(lambda x: x.strip(), lines)) linelen = len(lines[0]) linec = len(lines) def check(x,", "linec = len(lines) def check(x, y): posX = 0 posY = 0 trees", "posX = 0 posY = 0 trees = 0 while posY < linec:", "(3, 1), (5, 1), (7, 1), (1, 2)] result = 1 for i", "(7, 1), (1, 2)] result = 1 for i in slopes: result *=", "= 0 while posY < linec: if lines[posY][posX % linelen] == '#': trees", "linelen] == '#': trees += 1 posX+=x posY+=y return trees print(f'part 1: {check(3,", "x.strip(), lines)) linelen = len(lines[0]) linec = len(lines) def check(x, y): posX =", "0 trees = 0 while posY < linec: if lines[posY][posX % linelen] ==", "result = 1 for i in slopes: result *= check(i[0], i[1]) print(f'part 2:", "{check(3, 1)}') slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1,", "== '#': trees += 1 posX+=x posY+=y return trees print(f'part 1: {check(3, 1)}')", "posY < linec: if lines[posY][posX % linelen] == '#': trees += 1 posX+=x", "= len(lines[0]) linec = len(lines) def check(x, y): posX = 0 posY =", "posX+=x posY+=y return trees print(f'part 1: {check(3, 1)}') slopes = [(1, 1), (3,", "open('input.txt') as f: lines = f.readlines() lines = list(map(lambda x: x.strip(), lines)) linelen", "x: x.strip(), lines)) linelen = len(lines[0]) linec = len(lines) def check(x, y): posX", "def check(x, y): posX = 0 posY = 0 trees = 0 while", "posY = 0 trees = 0 while posY < linec: if lines[posY][posX %", "check(x, y): posX = 0 posY = 0 trees = 0 while posY", "1), (5, 1), (7, 1), (1, 2)] result = 1 for i in", "lines)) linelen = len(lines[0]) linec = len(lines) def check(x, y): posX = 0", "as f: lines = f.readlines() lines = list(map(lambda x: x.strip(), lines)) linelen =", "1: {check(3, 1)}') slopes = [(1, 1), (3, 1), (5, 1), (7, 1),", "1), (7, 1), (1, 2)] result = 1 for i in slopes: result", "list(map(lambda x: x.strip(), lines)) linelen = len(lines[0]) linec = len(lines) def check(x, y):", "if lines[posY][posX % linelen] == '#': trees += 1 posX+=x posY+=y return trees", "return trees print(f'part 1: {check(3, 1)}') slopes = [(1, 1), (3, 1), (5,", "= 0 posY = 0 trees = 0 while posY < linec: if", "print(f'part 1: {check(3, 1)}') slopes = [(1, 1), (3, 1), (5, 1), (7,", "1)}') slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]", "= [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)] result =", "lines = [] with open('input.txt') as f: lines = f.readlines() lines = list(map(lambda", "% linelen] == '#': trees += 1 posX+=x posY+=y return trees print(f'part 1:", "(5, 1), (7, 1), (1, 2)] result = 1 for i in slopes:", "= 1 for i in slopes: result *= check(i[0], i[1]) print(f'part 2: {result}')", "[(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)] result = 1", "y): posX = 0 posY = 0 trees = 0 while posY <", "len(lines) def check(x, y): posX = 0 posY = 0 trees = 0", "lines = f.readlines() lines = list(map(lambda x: x.strip(), lines)) linelen = len(lines[0]) linec", "[] with open('input.txt') as f: lines = f.readlines() lines = list(map(lambda x: x.strip()," ]
[ "<filename>shoppingcart/shop/context_processors.py from .models import Category def menu_links(request): \"\"\"get menu links\"\"\" links = Category.objects.all()", ".models import Category def menu_links(request): \"\"\"get menu links\"\"\" links = Category.objects.all() return dict(links=links)", "from .models import Category def menu_links(request): \"\"\"get menu links\"\"\" links = Category.objects.all() return" ]
[ "coding: utf-8 -*- \"\"\" @author: nicolas.posocco \"\"\" from .classwise_ece import classwise_ece from .classwise_ece_c", "nicolas.posocco \"\"\" from .classwise_ece import classwise_ece from .classwise_ece_c import classwise_ece_c from .classwise_ece_a import", "\"\"\" from .classwise_ece import classwise_ece from .classwise_ece_c import classwise_ece_c from .classwise_ece_a import classwise_ece_a", "classwise_ece from .classwise_ece_c import classwise_ece_c from .classwise_ece_a import classwise_ece_a from .classwise_ece_ac import classwise_ece_ac", "-*- \"\"\" @author: nicolas.posocco \"\"\" from .classwise_ece import classwise_ece from .classwise_ece_c import classwise_ece_c", "-*- coding: utf-8 -*- \"\"\" @author: nicolas.posocco \"\"\" from .classwise_ece import classwise_ece from", "utf-8 -*- \"\"\" @author: nicolas.posocco \"\"\" from .classwise_ece import classwise_ece from .classwise_ece_c import", "@author: nicolas.posocco \"\"\" from .classwise_ece import classwise_ece from .classwise_ece_c import classwise_ece_c from .classwise_ece_a", "from .classwise_ece import classwise_ece from .classwise_ece_c import classwise_ece_c from .classwise_ece_a import classwise_ece_a from", ".classwise_ece import classwise_ece from .classwise_ece_c import classwise_ece_c from .classwise_ece_a import classwise_ece_a from .classwise_ece_ac", "<reponame>euranova/estimating_eces # -*- coding: utf-8 -*- \"\"\" @author: nicolas.posocco \"\"\" from .classwise_ece import", "# -*- coding: utf-8 -*- \"\"\" @author: nicolas.posocco \"\"\" from .classwise_ece import classwise_ece", "import classwise_ece from .classwise_ece_c import classwise_ece_c from .classwise_ece_a import classwise_ece_a from .classwise_ece_ac import", "\"\"\" @author: nicolas.posocco \"\"\" from .classwise_ece import classwise_ece from .classwise_ece_c import classwise_ece_c from" ]
[ "self.placeholders['labels'], self.placeholders['labels_mask']) self.loss += self.pred_error def _accuracy(self): self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) def", "output_dim=self.configs['hidden_dims'], num_heads = self.num_heads, average_heads=False, act=tf.nn.elu, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True, parent_model=self)) self.layers.append(GraphAttention(input_dim=self.configs['hidden_dims']*self.num_heads,", "AttributeError(\"TensorFlow session not provided.\") saver = tf.train.Saver(self.vars) save_path = \"tmp/%s.ckpt\" % self.name saver.restore(sess,", "self.layers: for var in layer.vars.values(): if 'weight' in var.name: self.loss += self.configs['weight_decay'] *", "IDs.\"\"\" if layer_name not in self._LAYER_UIDS: self._LAYER_UIDS[layer_name] = 1 return 1 else: self._LAYER_UIDS[layer_name]", "save_path) class GAT(Model): def __init__(self, configs, placeholders, input_dim, **kwargs): super(GAT, self).__init__(**kwargs) self.configs =", "average_heads=False, act=tf.nn.elu, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True, parent_model=self)) self.layers.append(GraphAttention(input_dim=self.configs['hidden_dims']*self.num_heads, output_dim=self.output_dim, num_heads=1, average_heads=True, act=lambda", "self.optimizer = None self.opt_op = None def _build(self): raise NotImplementedError def build(self): \"\"\"", "self._accuracy() # Joint Optimzer for Attention and Model Parameters self.opt_op = self.optimizer.minimize(self.loss) #####################################################", "= {var.name: var for var in variables} # Build metrics self._loss() self._accuracy() #", "self.name saver.restore(sess, save_path) print(\"Model restored from file: %s\" % save_path) class GAT(Model): def", "0 self.optimizer = None self.opt_op = None def _build(self): raise NotImplementedError def build(self):", "session not provided.\") saver = tf.train.Saver(self.vars) save_path = saver.save(sess, \"tmp/%s.ckpt\" % self.name) print(\"Model", "1 else: self._LAYER_UIDS[layer_name] += 1 return self._LAYER_UIDS[layer_name] def predict(self): pass def _loss(self): raise", "if not name: name = self.__class__.__name__.lower() self.name = name self._LAYER_UIDS = {} self.vars", "**kwargs): name = kwargs.get('name') if not name: name = self.__class__.__name__.lower() self.name = name", "Uncomment these optimizers for Alternate Optimization # of Attention and Model Parameters #####################################################", "placeholders, input_dim, **kwargs): super(GAT, self).__init__(**kwargs) self.configs = configs self.inputs = placeholders['features'] self.input_dim =", "# Build sequential layer model self.activations.append(self.inputs) for layer in self.layers: hidden = layer(self.activations[-1])", "= {} self.vars = {} self.placeholders = {} self.layers = [] self.activations =", "layer(self.activations[-1]) self.activations.append(hidden) self.outputs = self.activations[-1] # Store model variables for easy access variables", "in file: %s\" % save_path) def load(self, sess=None): if not sess: raise AttributeError(\"TensorFlow", "self.configs = configs self.inputs = placeholders['features'] self.input_dim = input_dim self.output_dim = placeholders['labels'].get_shape().as_list()[1] self.placeholders", "configs['num_heads'] self.build() def _loss(self): # Weight decay loss for layer in self.layers: for", "= self.__class__.__name__.lower() self.name = name self._LAYER_UIDS = {} self.vars = {} self.placeholders =", "raise NotImplementedError def build(self): \"\"\" Wrapper for _build() \"\"\" with tf.variable_scope(self.name): self._build() #", "placeholders['features'] self.input_dim = input_dim self.output_dim = placeholders['labels'].get_shape().as_list()[1] self.placeholders = placeholders self.seed = configs['seed']", "for layer in self.layers: hidden = layer(self.activations[-1]) self.activations.append(hidden) self.outputs = self.activations[-1] # Store", "print(\"Model saved in file: %s\" % save_path) def load(self, sess=None): if not sess:", "None def _build(self): raise NotImplementedError def build(self): \"\"\" Wrapper for _build() \"\"\" with", "get_layer_uid(self,layer_name=''): \"\"\"Helper function, assigns unique layer IDs.\"\"\" if layer_name not in self._LAYER_UIDS: self._LAYER_UIDS[layer_name]", "metrics import * class Model(object): def __init__(self, **kwargs): name = kwargs.get('name') if not", "scope=self.name) self.model_op = self.optimizer.minimize(self.loss, var_list=model_variables) ''' ##################################################### def get_layer_uid(self,layer_name=''): \"\"\"Helper function, assigns unique", "Build metrics self._loss() self._accuracy() # Joint Optimzer for Attention and Model Parameters self.opt_op", "# Uncomment these optimizers for Alternate Optimization # of Attention and Model Parameters", "if not sess: raise AttributeError(\"TensorFlow session not provided.\") saver = tf.train.Saver(self.vars) save_path =", "Joint Optimzer for Attention and Model Parameters self.opt_op = self.optimizer.minimize(self.loss) ##################################################### # Uncomment", "self.placeholders['labels_mask']) self.loss += self.pred_error def _accuracy(self): self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) def _build(self):", "def __init__(self, **kwargs): name = kwargs.get('name') if not name: name = self.__class__.__name__.lower() self.name", "self.layers: hidden = layer(self.activations[-1]) self.activations.append(hidden) self.outputs = self.activations[-1] # Store model variables for", "tf.train.Saver(self.vars) save_path = \"tmp/%s.ckpt\" % self.name saver.restore(sess, save_path) print(\"Model restored from file: %s\"", "Build sequential layer model self.activations.append(self.inputs) for layer in self.layers: hidden = layer(self.activations[-1]) self.activations.append(hidden)", "Model Parameters self.opt_op = self.optimizer.minimize(self.loss) ##################################################### # Uncomment these optimizers for Alternate Optimization", "for var in layer.vars.values(): if 'weight' in var.name: self.loss += self.configs['weight_decay'] * tf.nn.l2_loss(var)", "self.activations.append(hidden) self.outputs = self.activations[-1] # Store model variables for easy access variables =", "model self.activations.append(self.inputs) for layer in self.layers: hidden = layer(self.activations[-1]) self.activations.append(hidden) self.outputs = self.activations[-1]", "def get_layer_uid(self,layer_name=''): \"\"\"Helper function, assigns unique layer IDs.\"\"\" if layer_name not in self._LAYER_UIDS:", "saver.save(sess, \"tmp/%s.ckpt\" % self.name) print(\"Model saved in file: %s\" % save_path) def load(self,", "class GAT(Model): def __init__(self, configs, placeholders, input_dim, **kwargs): super(GAT, self).__init__(**kwargs) self.configs = configs", "self.placeholders = placeholders self.seed = configs['seed'] self.optimizer = tf.train.AdamOptimizer(learning_rate=configs['learning_rate']) self.num_heads = configs['num_heads'] self.build()", "= None self.opt_op = None def _build(self): raise NotImplementedError def build(self): \"\"\" Wrapper", "self.opt_op = self.optimizer.minimize(self.loss) ##################################################### # Uncomment these optimizers for Alternate Optimization # of", "= {} self.layers = [] self.activations = [] self.inputs = None self.outputs =", "self._LAYER_UIDS = {} self.vars = {} self.placeholders = {} self.layers = [] self.activations", "def load(self, sess=None): if not sess: raise AttributeError(\"TensorFlow session not provided.\") saver =", "for _build() \"\"\" with tf.variable_scope(self.name): self._build() # Build sequential layer model self.activations.append(self.inputs) for", "self.loss += self.configs['weight_decay'] * tf.nn.l2_loss(var) # Cross entropy error self.pred_error = masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'],", "= self.optimizer.minimize(self.loss) ##################################################### # Uncomment these optimizers for Alternate Optimization # of Attention", "name: name = self.__class__.__name__.lower() self.name = name self._LAYER_UIDS = {} self.vars = {}", "self.inputs = placeholders['features'] self.input_dim = input_dim self.output_dim = placeholders['labels'].get_shape().as_list()[1] self.placeholders = placeholders self.seed", "_build(self): raise NotImplementedError def build(self): \"\"\" Wrapper for _build() \"\"\" with tf.variable_scope(self.name): self._build()", "and Model Parameters self.opt_op = self.optimizer.minimize(self.loss) ##################################################### # Uncomment these optimizers for Alternate", "variables} # Build metrics self._loss() self._accuracy() # Joint Optimzer for Attention and Model", "save_path = saver.save(sess, \"tmp/%s.ckpt\" % self.name) print(\"Model saved in file: %s\" % save_path)", "# Cross entropy error self.pred_error = masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) self.loss += self.pred_error def", "kwargs.get('name') if not name: name = self.__class__.__name__.lower() self.name = name self._LAYER_UIDS = {}", "model_dropout=True, attention_dropout=True, bias=True, attention_bias=True, parent_model=self)) self.layers.append(GraphAttention(input_dim=self.configs['hidden_dims']*self.num_heads, output_dim=self.output_dim, num_heads=1, average_heads=True, act=lambda x: x, placeholders=self.placeholders,", "for var in variables} # Build metrics self._loss() self._accuracy() # Joint Optimzer for", "save_path) print(\"Model restored from file: %s\" % save_path) class GAT(Model): def __init__(self, configs,", "''' ##################################################### def get_layer_uid(self,layer_name=''): \"\"\"Helper function, assigns unique layer IDs.\"\"\" if layer_name not", "if layer_name not in self._LAYER_UIDS: self._LAYER_UIDS[layer_name] = 1 return 1 else: self._LAYER_UIDS[layer_name] +=", "self.vars = {} self.placeholders = {} self.layers = [] self.activations = [] self.inputs", "import * from metrics import * class Model(object): def __init__(self, **kwargs): name =", "sess: raise AttributeError(\"TensorFlow session not provided.\") saver = tf.train.Saver(self.vars) save_path = \"tmp/%s.ckpt\" %", "else: self._LAYER_UIDS[layer_name] += 1 return self._LAYER_UIDS[layer_name] def predict(self): pass def _loss(self): raise NotImplementedError", "save_path = \"tmp/%s.ckpt\" % self.name saver.restore(sess, save_path) print(\"Model restored from file: %s\" %", "self.opt_op = None def _build(self): raise NotImplementedError def build(self): \"\"\" Wrapper for _build()", "layer.vars.values(): if 'weight' in var.name: self.loss += self.configs['weight_decay'] * tf.nn.l2_loss(var) # Cross entropy", "save(self, sess=None): if not sess: raise AttributeError(\"TensorFlow session not provided.\") saver = tf.train.Saver(self.vars)", "self.layers.append(GraphAttention(input_dim=self.input_dim, output_dim=self.configs['hidden_dims'], num_heads = self.num_heads, average_heads=False, act=tf.nn.elu, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True, parent_model=self))", "in var.name: self.loss += self.configs['weight_decay'] * tf.nn.l2_loss(var) # Cross entropy error self.pred_error =", "[] self.activations = [] self.inputs = None self.outputs = None self.loss = 0", "= saver.save(sess, \"tmp/%s.ckpt\" % self.name) print(\"Model saved in file: %s\" % save_path) def", "var_list=model_variables) ''' ##################################################### def get_layer_uid(self,layer_name=''): \"\"\"Helper function, assigns unique layer IDs.\"\"\" if layer_name", "Weight decay loss for layer in self.layers: for var in layer.vars.values(): if 'weight'", "= masked_accuracy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) def _build(self): self.layers.append(GraphAttention(input_dim=self.input_dim, output_dim=self.configs['hidden_dims'], num_heads = self.num_heads, average_heads=False, act=tf.nn.elu,", "Attention and Model Parameters ##################################################### ''' attention_variables = tf.get_collection('ATTENTION_WEIGHTS', scope=self.name) self.attention_op = self.optimizer.minimize(self.loss,", "class Model(object): def __init__(self, **kwargs): name = kwargs.get('name') if not name: name =", "def _loss(self): # Weight decay loss for layer in self.layers: for var in", "metrics self._loss() self._accuracy() # Joint Optimzer for Attention and Model Parameters self.opt_op =", "self.activations[-1] # Store model variables for easy access variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) self.vars", "act=tf.nn.elu, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True, parent_model=self)) self.layers.append(GraphAttention(input_dim=self.configs['hidden_dims']*self.num_heads, output_dim=self.output_dim, num_heads=1, average_heads=True, act=lambda x:", "= placeholders['features'] self.input_dim = input_dim self.output_dim = placeholders['labels'].get_shape().as_list()[1] self.placeholders = placeholders self.seed =", "file: %s\" % save_path) def load(self, sess=None): if not sess: raise AttributeError(\"TensorFlow session", "''' attention_variables = tf.get_collection('ATTENTION_WEIGHTS', scope=self.name) self.attention_op = self.optimizer.minimize(self.loss, var_list=attention_variables) model_variables = tf.get_collection('MODEL_WEIGHTS', scope=self.name)", "tf.train.Saver(self.vars) save_path = saver.save(sess, \"tmp/%s.ckpt\" % self.name) print(\"Model saved in file: %s\" %", "predict(self): pass def _loss(self): raise NotImplementedError def _accuracy(self): raise NotImplementedError def save(self, sess=None):", "{var.name: var for var in variables} # Build metrics self._loss() self._accuracy() # Joint", "= None self.outputs = None self.loss = 0 self.accuracy = 0 self.optimizer =", "self.name) print(\"Model saved in file: %s\" % save_path) def load(self, sess=None): if not", "not provided.\") saver = tf.train.Saver(self.vars) save_path = saver.save(sess, \"tmp/%s.ckpt\" % self.name) print(\"Model saved", "placeholders['labels'].get_shape().as_list()[1] self.placeholders = placeholders self.seed = configs['seed'] self.optimizer = tf.train.AdamOptimizer(learning_rate=configs['learning_rate']) self.num_heads = configs['num_heads']", "configs, placeholders, input_dim, **kwargs): super(GAT, self).__init__(**kwargs) self.configs = configs self.inputs = placeholders['features'] self.input_dim", "unique layer IDs.\"\"\" if layer_name not in self._LAYER_UIDS: self._LAYER_UIDS[layer_name] = 1 return 1", "raise AttributeError(\"TensorFlow session not provided.\") saver = tf.train.Saver(self.vars) save_path = \"tmp/%s.ckpt\" % self.name", "output_dim=self.output_dim, num_heads=1, average_heads=True, act=lambda x: x, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True, parent_model=self)) def", "= self.num_heads, average_heads=False, act=tf.nn.elu, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True, parent_model=self)) self.layers.append(GraphAttention(input_dim=self.configs['hidden_dims']*self.num_heads, output_dim=self.output_dim, num_heads=1,", "\"\"\" Wrapper for _build() \"\"\" with tf.variable_scope(self.name): self._build() # Build sequential layer model", "layer in self.layers: hidden = layer(self.activations[-1]) self.activations.append(hidden) self.outputs = self.activations[-1] # Store model", "load(self, sess=None): if not sess: raise AttributeError(\"TensorFlow session not provided.\") saver = tf.train.Saver(self.vars)", "act=lambda x: x, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True, parent_model=self)) def predict(self): return tf.nn.softmax(self.outputs)", "Parameters ##################################################### ''' attention_variables = tf.get_collection('ATTENTION_WEIGHTS', scope=self.name) self.attention_op = self.optimizer.minimize(self.loss, var_list=attention_variables) model_variables =", "* from metrics import * class Model(object): def __init__(self, **kwargs): name = kwargs.get('name')", "model variables for easy access variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) self.vars = {var.name: var", "= 1 return 1 else: self._LAYER_UIDS[layer_name] += 1 return self._LAYER_UIDS[layer_name] def predict(self): pass", "self.output_dim = placeholders['labels'].get_shape().as_list()[1] self.placeholders = placeholders self.seed = configs['seed'] self.optimizer = tf.train.AdamOptimizer(learning_rate=configs['learning_rate']) self.num_heads", "in self.layers: for var in layer.vars.values(): if 'weight' in var.name: self.loss += self.configs['weight_decay']", "provided.\") saver = tf.train.Saver(self.vars) save_path = saver.save(sess, \"tmp/%s.ckpt\" % self.name) print(\"Model saved in", "= tf.train.AdamOptimizer(learning_rate=configs['learning_rate']) self.num_heads = configs['num_heads'] self.build() def _loss(self): # Weight decay loss for", "hidden = layer(self.activations[-1]) self.activations.append(hidden) self.outputs = self.activations[-1] # Store model variables for easy", "tf.train.AdamOptimizer(learning_rate=configs['learning_rate']) self.num_heads = configs['num_heads'] self.build() def _loss(self): # Weight decay loss for layer", "masked_accuracy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) def _build(self): self.layers.append(GraphAttention(input_dim=self.input_dim, output_dim=self.configs['hidden_dims'], num_heads = self.num_heads, average_heads=False, act=tf.nn.elu, placeholders=self.placeholders,", "for easy access variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) self.vars = {var.name: var for var", "self.input_dim = input_dim self.output_dim = placeholders['labels'].get_shape().as_list()[1] self.placeholders = placeholders self.seed = configs['seed'] self.optimizer", "= \"tmp/%s.ckpt\" % self.name saver.restore(sess, save_path) print(\"Model restored from file: %s\" % save_path)", "self.optimizer.minimize(self.loss, var_list=model_variables) ''' ##################################################### def get_layer_uid(self,layer_name=''): \"\"\"Helper function, assigns unique layer IDs.\"\"\" if", "= [] self.inputs = None self.outputs = None self.loss = 0 self.accuracy =", "error self.pred_error = masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) self.loss += self.pred_error def _accuracy(self): self.accuracy =", "provided.\") saver = tf.train.Saver(self.vars) save_path = \"tmp/%s.ckpt\" % self.name saver.restore(sess, save_path) print(\"Model restored", "var in layer.vars.values(): if 'weight' in var.name: self.loss += self.configs['weight_decay'] * tf.nn.l2_loss(var) #", "not sess: raise AttributeError(\"TensorFlow session not provided.\") saver = tf.train.Saver(self.vars) save_path = \"tmp/%s.ckpt\"", "+= self.pred_error def _accuracy(self): self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) def _build(self): self.layers.append(GraphAttention(input_dim=self.input_dim, output_dim=self.configs['hidden_dims'],", "self.build() def _loss(self): # Weight decay loss for layer in self.layers: for var", "= configs['seed'] self.optimizer = tf.train.AdamOptimizer(learning_rate=configs['learning_rate']) self.num_heads = configs['num_heads'] self.build() def _loss(self): # Weight", "Store model variables for easy access variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) self.vars = {var.name:", "return 1 else: self._LAYER_UIDS[layer_name] += 1 return self._LAYER_UIDS[layer_name] def predict(self): pass def _loss(self):", "def predict(self): pass def _loss(self): raise NotImplementedError def _accuracy(self): raise NotImplementedError def save(self,", "self.pred_error = masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) self.loss += self.pred_error def _accuracy(self): self.accuracy = masked_accuracy(self.outputs,", "self.activations = [] self.inputs = None self.outputs = None self.loss = 0 self.accuracy", "= name self._LAYER_UIDS = {} self.vars = {} self.placeholders = {} self.layers =", "Model Parameters ##################################################### ''' attention_variables = tf.get_collection('ATTENTION_WEIGHTS', scope=self.name) self.attention_op = self.optimizer.minimize(self.loss, var_list=attention_variables) model_variables", "decay loss for layer in self.layers: for var in layer.vars.values(): if 'weight' in", "\"\"\"Helper function, assigns unique layer IDs.\"\"\" if layer_name not in self._LAYER_UIDS: self._LAYER_UIDS[layer_name] =", "configs['seed'] self.optimizer = tf.train.AdamOptimizer(learning_rate=configs['learning_rate']) self.num_heads = configs['num_heads'] self.build() def _loss(self): # Weight decay", "layer IDs.\"\"\" if layer_name not in self._LAYER_UIDS: self._LAYER_UIDS[layer_name] = 1 return 1 else:", "self.layers.append(GraphAttention(input_dim=self.configs['hidden_dims']*self.num_heads, output_dim=self.output_dim, num_heads=1, average_heads=True, act=lambda x: x, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True, parent_model=self))", "GAT(Model): def __init__(self, configs, placeholders, input_dim, **kwargs): super(GAT, self).__init__(**kwargs) self.configs = configs self.inputs", "tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) self.vars = {var.name: var for var in variables} # Build metrics", "attention_variables = tf.get_collection('ATTENTION_WEIGHTS', scope=self.name) self.attention_op = self.optimizer.minimize(self.loss, var_list=attention_variables) model_variables = tf.get_collection('MODEL_WEIGHTS', scope=self.name) self.model_op", "'weight' in var.name: self.loss += self.configs['weight_decay'] * tf.nn.l2_loss(var) # Cross entropy error self.pred_error", "self.vars = {var.name: var for var in variables} # Build metrics self._loss() self._accuracy()", "__init__(self, configs, placeholders, input_dim, **kwargs): super(GAT, self).__init__(**kwargs) self.configs = configs self.inputs = placeholders['features']", "placeholders self.seed = configs['seed'] self.optimizer = tf.train.AdamOptimizer(learning_rate=configs['learning_rate']) self.num_heads = configs['num_heads'] self.build() def _loss(self):", "Alternate Optimization # of Attention and Model Parameters ##################################################### ''' attention_variables = tf.get_collection('ATTENTION_WEIGHTS',", "model_variables = tf.get_collection('MODEL_WEIGHTS', scope=self.name) self.model_op = self.optimizer.minimize(self.loss, var_list=model_variables) ''' ##################################################### def get_layer_uid(self,layer_name=''): \"\"\"Helper", "restored from file: %s\" % save_path) class GAT(Model): def __init__(self, configs, placeholders, input_dim,", "def _build(self): raise NotImplementedError def build(self): \"\"\" Wrapper for _build() \"\"\" with tf.variable_scope(self.name):", "None self.opt_op = None def _build(self): raise NotImplementedError def build(self): \"\"\" Wrapper for", "self._LAYER_UIDS: self._LAYER_UIDS[layer_name] = 1 return 1 else: self._LAYER_UIDS[layer_name] += 1 return self._LAYER_UIDS[layer_name] def", "layer model self.activations.append(self.inputs) for layer in self.layers: hidden = layer(self.activations[-1]) self.activations.append(hidden) self.outputs =", "= configs self.inputs = placeholders['features'] self.input_dim = input_dim self.output_dim = placeholders['labels'].get_shape().as_list()[1] self.placeholders =", "= input_dim self.output_dim = placeholders['labels'].get_shape().as_list()[1] self.placeholders = placeholders self.seed = configs['seed'] self.optimizer =", "* class Model(object): def __init__(self, **kwargs): name = kwargs.get('name') if not name: name", "num_heads = self.num_heads, average_heads=False, act=tf.nn.elu, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True, parent_model=self)) self.layers.append(GraphAttention(input_dim=self.configs['hidden_dims']*self.num_heads, output_dim=self.output_dim,", "self.activations.append(self.inputs) for layer in self.layers: hidden = layer(self.activations[-1]) self.activations.append(hidden) self.outputs = self.activations[-1] #", "build(self): \"\"\" Wrapper for _build() \"\"\" with tf.variable_scope(self.name): self._build() # Build sequential layer", "super(GAT, self).__init__(**kwargs) self.configs = configs self.inputs = placeholders['features'] self.input_dim = input_dim self.output_dim =", "= placeholders['labels'].get_shape().as_list()[1] self.placeholders = placeholders self.seed = configs['seed'] self.optimizer = tf.train.AdamOptimizer(learning_rate=configs['learning_rate']) self.num_heads =", "= [] self.activations = [] self.inputs = None self.outputs = None self.loss =", "var for var in variables} # Build metrics self._loss() self._accuracy() # Joint Optimzer", "_loss(self): # Weight decay loss for layer in self.layers: for var in layer.vars.values():", "sess: raise AttributeError(\"TensorFlow session not provided.\") saver = tf.train.Saver(self.vars) save_path = saver.save(sess, \"tmp/%s.ckpt\"", "not provided.\") saver = tf.train.Saver(self.vars) save_path = \"tmp/%s.ckpt\" % self.name saver.restore(sess, save_path) print(\"Model", "saver = tf.train.Saver(self.vars) save_path = \"tmp/%s.ckpt\" % self.name saver.restore(sess, save_path) print(\"Model restored from", "= configs['num_heads'] self.build() def _loss(self): # Weight decay loss for layer in self.layers:", "* tf.nn.l2_loss(var) # Cross entropy error self.pred_error = masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) self.loss +=", "tf.nn.l2_loss(var) # Cross entropy error self.pred_error = masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) self.loss += self.pred_error", "attention_dropout=True, bias=True, attention_bias=True, parent_model=self)) self.layers.append(GraphAttention(input_dim=self.configs['hidden_dims']*self.num_heads, output_dim=self.output_dim, num_heads=1, average_heads=True, act=lambda x: x, placeholders=self.placeholders, model_dropout=True,", "self.seed = configs['seed'] self.optimizer = tf.train.AdamOptimizer(learning_rate=configs['learning_rate']) self.num_heads = configs['num_heads'] self.build() def _loss(self): #", "def _accuracy(self): self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) def _build(self): self.layers.append(GraphAttention(input_dim=self.input_dim, output_dim=self.configs['hidden_dims'], num_heads =", "# Store model variables for easy access variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) self.vars =", "with tf.variable_scope(self.name): self._build() # Build sequential layer model self.activations.append(self.inputs) for layer in self.layers:", "_build(self): self.layers.append(GraphAttention(input_dim=self.input_dim, output_dim=self.configs['hidden_dims'], num_heads = self.num_heads, average_heads=False, act=tf.nn.elu, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True,", "of Attention and Model Parameters ##################################################### ''' attention_variables = tf.get_collection('ATTENTION_WEIGHTS', scope=self.name) self.attention_op =", "# of Attention and Model Parameters ##################################################### ''' attention_variables = tf.get_collection('ATTENTION_WEIGHTS', scope=self.name) self.attention_op", "not in self._LAYER_UIDS: self._LAYER_UIDS[layer_name] = 1 return 1 else: self._LAYER_UIDS[layer_name] += 1 return", "\"tmp/%s.ckpt\" % self.name saver.restore(sess, save_path) print(\"Model restored from file: %s\" % save_path) class", "self.optimizer.minimize(self.loss, var_list=attention_variables) model_variables = tf.get_collection('MODEL_WEIGHTS', scope=self.name) self.model_op = self.optimizer.minimize(self.loss, var_list=model_variables) ''' ##################################################### def", "sequential layer model self.activations.append(self.inputs) for layer in self.layers: hidden = layer(self.activations[-1]) self.activations.append(hidden) self.outputs", "= self.optimizer.minimize(self.loss, var_list=attention_variables) model_variables = tf.get_collection('MODEL_WEIGHTS', scope=self.name) self.model_op = self.optimizer.minimize(self.loss, var_list=model_variables) ''' #####################################################", "= 0 self.optimizer = None self.opt_op = None def _build(self): raise NotImplementedError def", "optimizers for Alternate Optimization # of Attention and Model Parameters ##################################################### ''' attention_variables", "in self.layers: hidden = layer(self.activations[-1]) self.activations.append(hidden) self.outputs = self.activations[-1] # Store model variables", "easy access variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) self.vars = {var.name: var for var in", "save_path) def load(self, sess=None): if not sess: raise AttributeError(\"TensorFlow session not provided.\") saver", "Cross entropy error self.pred_error = masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) self.loss += self.pred_error def _accuracy(self):", "self._loss() self._accuracy() # Joint Optimzer for Attention and Model Parameters self.opt_op = self.optimizer.minimize(self.loss)", "var_list=attention_variables) model_variables = tf.get_collection('MODEL_WEIGHTS', scope=self.name) self.model_op = self.optimizer.minimize(self.loss, var_list=model_variables) ''' ##################################################### def get_layer_uid(self,layer_name=''):", "for layer in self.layers: for var in layer.vars.values(): if 'weight' in var.name: self.loss", "##################################################### ''' attention_variables = tf.get_collection('ATTENTION_WEIGHTS', scope=self.name) self.attention_op = self.optimizer.minimize(self.loss, var_list=attention_variables) model_variables = tf.get_collection('MODEL_WEIGHTS',", "average_heads=True, act=lambda x: x, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True, parent_model=self)) def predict(self): return", "if 'weight' in var.name: self.loss += self.configs['weight_decay'] * tf.nn.l2_loss(var) # Cross entropy error", "Model(object): def __init__(self, **kwargs): name = kwargs.get('name') if not name: name = self.__class__.__name__.lower()", "Optimization # of Attention and Model Parameters ##################################################### ''' attention_variables = tf.get_collection('ATTENTION_WEIGHTS', scope=self.name)", "= None self.loss = 0 self.accuracy = 0 self.optimizer = None self.opt_op =", "def save(self, sess=None): if not sess: raise AttributeError(\"TensorFlow session not provided.\") saver =", "%s\" % save_path) class GAT(Model): def __init__(self, configs, placeholders, input_dim, **kwargs): super(GAT, self).__init__(**kwargs)", "_accuracy(self): raise NotImplementedError def save(self, sess=None): if not sess: raise AttributeError(\"TensorFlow session not", "**kwargs): super(GAT, self).__init__(**kwargs) self.configs = configs self.inputs = placeholders['features'] self.input_dim = input_dim self.output_dim", "self.num_heads = configs['num_heads'] self.build() def _loss(self): # Weight decay loss for layer in", "print(\"Model restored from file: %s\" % save_path) class GAT(Model): def __init__(self, configs, placeholders,", "self.outputs = self.activations[-1] # Store model variables for easy access variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,", "entropy error self.pred_error = masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) self.loss += self.pred_error def _accuracy(self): self.accuracy", "self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) def _build(self): self.layers.append(GraphAttention(input_dim=self.input_dim, output_dim=self.configs['hidden_dims'], num_heads = self.num_heads, average_heads=False,", "scope=self.name) self.vars = {var.name: var for var in variables} # Build metrics self._loss()", "1 return self._LAYER_UIDS[layer_name] def predict(self): pass def _loss(self): raise NotImplementedError def _accuracy(self): raise", "self.placeholders = {} self.layers = [] self.activations = [] self.inputs = None self.outputs", "self.optimizer = tf.train.AdamOptimizer(learning_rate=configs['learning_rate']) self.num_heads = configs['num_heads'] self.build() def _loss(self): # Weight decay loss", "self.pred_error def _accuracy(self): self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) def _build(self): self.layers.append(GraphAttention(input_dim=self.input_dim, output_dim=self.configs['hidden_dims'], num_heads", "name = self.__class__.__name__.lower() self.name = name self._LAYER_UIDS = {} self.vars = {} self.placeholders", "function, assigns unique layer IDs.\"\"\" if layer_name not in self._LAYER_UIDS: self._LAYER_UIDS[layer_name] = 1", "= tf.train.Saver(self.vars) save_path = saver.save(sess, \"tmp/%s.ckpt\" % self.name) print(\"Model saved in file: %s\"", "self.name = name self._LAYER_UIDS = {} self.vars = {} self.placeholders = {} self.layers", "these optimizers for Alternate Optimization # of Attention and Model Parameters ##################################################### '''", "+= 1 return self._LAYER_UIDS[layer_name] def predict(self): pass def _loss(self): raise NotImplementedError def _accuracy(self):", "configs self.inputs = placeholders['features'] self.input_dim = input_dim self.output_dim = placeholders['labels'].get_shape().as_list()[1] self.placeholders = placeholders", "{} self.layers = [] self.activations = [] self.inputs = None self.outputs = None", "self._LAYER_UIDS[layer_name] = 1 return 1 else: self._LAYER_UIDS[layer_name] += 1 return self._LAYER_UIDS[layer_name] def predict(self):", "self.layers = [] self.activations = [] self.inputs = None self.outputs = None self.loss", "Parameters self.opt_op = self.optimizer.minimize(self.loss) ##################################################### # Uncomment these optimizers for Alternate Optimization #", "for Alternate Optimization # of Attention and Model Parameters ##################################################### ''' attention_variables =", "pass def _loss(self): raise NotImplementedError def _accuracy(self): raise NotImplementedError def save(self, sess=None): if", "def _build(self): self.layers.append(GraphAttention(input_dim=self.input_dim, output_dim=self.configs['hidden_dims'], num_heads = self.num_heads, average_heads=False, act=tf.nn.elu, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True,", "masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) self.loss += self.pred_error def _accuracy(self): self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask'])", "# Joint Optimzer for Attention and Model Parameters self.opt_op = self.optimizer.minimize(self.loss) ##################################################### #", "def build(self): \"\"\" Wrapper for _build() \"\"\" with tf.variable_scope(self.name): self._build() # Build sequential", "self.num_heads, average_heads=False, act=tf.nn.elu, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True, parent_model=self)) self.layers.append(GraphAttention(input_dim=self.configs['hidden_dims']*self.num_heads, output_dim=self.output_dim, num_heads=1, average_heads=True,", "__init__(self, **kwargs): name = kwargs.get('name') if not name: name = self.__class__.__name__.lower() self.name =", "_accuracy(self): self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) def _build(self): self.layers.append(GraphAttention(input_dim=self.input_dim, output_dim=self.configs['hidden_dims'], num_heads = self.num_heads,", "self.placeholders['labels'], self.placeholders['labels_mask']) def _build(self): self.layers.append(GraphAttention(input_dim=self.input_dim, output_dim=self.configs['hidden_dims'], num_heads = self.num_heads, average_heads=False, act=tf.nn.elu, placeholders=self.placeholders, model_dropout=True,", "NotImplementedError def _accuracy(self): raise NotImplementedError def save(self, sess=None): if not sess: raise AttributeError(\"TensorFlow", "input_dim, **kwargs): super(GAT, self).__init__(**kwargs) self.configs = configs self.inputs = placeholders['features'] self.input_dim = input_dim", "name = kwargs.get('name') if not name: name = self.__class__.__name__.lower() self.name = name self._LAYER_UIDS", "self.loss = 0 self.accuracy = 0 self.optimizer = None self.opt_op = None def", "variables for easy access variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) self.vars = {var.name: var for", "self.inputs = None self.outputs = None self.loss = 0 self.accuracy = 0 self.optimizer", "= tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) self.vars = {var.name: var for var in variables} # Build", "self.loss += self.pred_error def _accuracy(self): self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) def _build(self): self.layers.append(GraphAttention(input_dim=self.input_dim,", "self.attention_op = self.optimizer.minimize(self.loss, var_list=attention_variables) model_variables = tf.get_collection('MODEL_WEIGHTS', scope=self.name) self.model_op = self.optimizer.minimize(self.loss, var_list=model_variables) '''", "+= self.configs['weight_decay'] * tf.nn.l2_loss(var) # Cross entropy error self.pred_error = masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask'])", "= tf.train.Saver(self.vars) save_path = \"tmp/%s.ckpt\" % self.name saver.restore(sess, save_path) print(\"Model restored from file:", "= layer(self.activations[-1]) self.activations.append(hidden) self.outputs = self.activations[-1] # Store model variables for easy access", "self.optimizer.minimize(self.loss) ##################################################### # Uncomment these optimizers for Alternate Optimization # of Attention and", "AttributeError(\"TensorFlow session not provided.\") saver = tf.train.Saver(self.vars) save_path = saver.save(sess, \"tmp/%s.ckpt\" % self.name)", "[] self.inputs = None self.outputs = None self.loss = 0 self.accuracy = 0", "% self.name saver.restore(sess, save_path) print(\"Model restored from file: %s\" % save_path) class GAT(Model):", "def _loss(self): raise NotImplementedError def _accuracy(self): raise NotImplementedError def save(self, sess=None): if not", "bias=True, attention_bias=True, parent_model=self)) self.layers.append(GraphAttention(input_dim=self.configs['hidden_dims']*self.num_heads, output_dim=self.output_dim, num_heads=1, average_heads=True, act=lambda x: x, placeholders=self.placeholders, model_dropout=True, attention_dropout=True,", "file: %s\" % save_path) class GAT(Model): def __init__(self, configs, placeholders, input_dim, **kwargs): super(GAT,", "var in variables} # Build metrics self._loss() self._accuracy() # Joint Optimzer for Attention", "num_heads=1, average_heads=True, act=lambda x: x, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True, parent_model=self)) def predict(self):", "= {} self.placeholders = {} self.layers = [] self.activations = [] self.inputs =", "scope=self.name) self.attention_op = self.optimizer.minimize(self.loss, var_list=attention_variables) model_variables = tf.get_collection('MODEL_WEIGHTS', scope=self.name) self.model_op = self.optimizer.minimize(self.loss, var_list=model_variables)", "= placeholders self.seed = configs['seed'] self.optimizer = tf.train.AdamOptimizer(learning_rate=configs['learning_rate']) self.num_heads = configs['num_heads'] self.build() def", "tf.get_collection('ATTENTION_WEIGHTS', scope=self.name) self.attention_op = self.optimizer.minimize(self.loss, var_list=attention_variables) model_variables = tf.get_collection('MODEL_WEIGHTS', scope=self.name) self.model_op = self.optimizer.minimize(self.loss,", "sess=None): if not sess: raise AttributeError(\"TensorFlow session not provided.\") saver = tf.train.Saver(self.vars) save_path", "None self.loss = 0 self.accuracy = 0 self.optimizer = None self.opt_op = None", "% save_path) def load(self, sess=None): if not sess: raise AttributeError(\"TensorFlow session not provided.\")", "parent_model=self)) self.layers.append(GraphAttention(input_dim=self.configs['hidden_dims']*self.num_heads, output_dim=self.output_dim, num_heads=1, average_heads=True, act=lambda x: x, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True,", "1 return 1 else: self._LAYER_UIDS[layer_name] += 1 return self._LAYER_UIDS[layer_name] def predict(self): pass def", "in layer.vars.values(): if 'weight' in var.name: self.loss += self.configs['weight_decay'] * tf.nn.l2_loss(var) # Cross", "self.placeholders['labels_mask']) def _build(self): self.layers.append(GraphAttention(input_dim=self.input_dim, output_dim=self.configs['hidden_dims'], num_heads = self.num_heads, average_heads=False, act=tf.nn.elu, placeholders=self.placeholders, model_dropout=True, attention_dropout=True,", "Optimzer for Attention and Model Parameters self.opt_op = self.optimizer.minimize(self.loss) ##################################################### # Uncomment these", "Wrapper for _build() \"\"\" with tf.variable_scope(self.name): self._build() # Build sequential layer model self.activations.append(self.inputs)", "= self.activations[-1] # Store model variables for easy access variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)", "in self._LAYER_UIDS: self._LAYER_UIDS[layer_name] = 1 return 1 else: self._LAYER_UIDS[layer_name] += 1 return self._LAYER_UIDS[layer_name]", "{} self.vars = {} self.placeholders = {} self.layers = [] self.activations = []", "raise AttributeError(\"TensorFlow session not provided.\") saver = tf.train.Saver(self.vars) save_path = saver.save(sess, \"tmp/%s.ckpt\" %", "% self.name) print(\"Model saved in file: %s\" % save_path) def load(self, sess=None): if", "Attention and Model Parameters self.opt_op = self.optimizer.minimize(self.loss) ##################################################### # Uncomment these optimizers for", "for Attention and Model Parameters self.opt_op = self.optimizer.minimize(self.loss) ##################################################### # Uncomment these optimizers", "self).__init__(**kwargs) self.configs = configs self.inputs = placeholders['features'] self.input_dim = input_dim self.output_dim = placeholders['labels'].get_shape().as_list()[1]", "attention_bias=True, parent_model=self)) self.layers.append(GraphAttention(input_dim=self.configs['hidden_dims']*self.num_heads, output_dim=self.output_dim, num_heads=1, average_heads=True, act=lambda x: x, placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True,", "import * class Model(object): def __init__(self, **kwargs): name = kwargs.get('name') if not name:", "##################################################### # Uncomment these optimizers for Alternate Optimization # of Attention and Model", "= tf.get_collection('MODEL_WEIGHTS', scope=self.name) self.model_op = self.optimizer.minimize(self.loss, var_list=model_variables) ''' ##################################################### def get_layer_uid(self,layer_name=''): \"\"\"Helper function,", "layer_name not in self._LAYER_UIDS: self._LAYER_UIDS[layer_name] = 1 return 1 else: self._LAYER_UIDS[layer_name] += 1", "not name: name = self.__class__.__name__.lower() self.name = name self._LAYER_UIDS = {} self.vars =", "_build() \"\"\" with tf.variable_scope(self.name): self._build() # Build sequential layer model self.activations.append(self.inputs) for layer", "in variables} # Build metrics self._loss() self._accuracy() # Joint Optimzer for Attention and", "name self._LAYER_UIDS = {} self.vars = {} self.placeholders = {} self.layers = []", "% save_path) class GAT(Model): def __init__(self, configs, placeholders, input_dim, **kwargs): super(GAT, self).__init__(**kwargs) self.configs", "input_dim self.output_dim = placeholders['labels'].get_shape().as_list()[1] self.placeholders = placeholders self.seed = configs['seed'] self.optimizer = tf.train.AdamOptimizer(learning_rate=configs['learning_rate'])", "placeholders=self.placeholders, model_dropout=True, attention_dropout=True, bias=True, attention_bias=True, parent_model=self)) self.layers.append(GraphAttention(input_dim=self.configs['hidden_dims']*self.num_heads, output_dim=self.output_dim, num_heads=1, average_heads=True, act=lambda x: x,", "self.configs['weight_decay'] * tf.nn.l2_loss(var) # Cross entropy error self.pred_error = masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) self.loss", "layer in self.layers: for var in layer.vars.values(): if 'weight' in var.name: self.loss +=", "tf.get_collection('MODEL_WEIGHTS', scope=self.name) self.model_op = self.optimizer.minimize(self.loss, var_list=model_variables) ''' ##################################################### def get_layer_uid(self,layer_name=''): \"\"\"Helper function, assigns", "def __init__(self, configs, placeholders, input_dim, **kwargs): super(GAT, self).__init__(**kwargs) self.configs = configs self.inputs =", "self._LAYER_UIDS[layer_name] += 1 return self._LAYER_UIDS[layer_name] def predict(self): pass def _loss(self): raise NotImplementedError def", "\"\"\" with tf.variable_scope(self.name): self._build() # Build sequential layer model self.activations.append(self.inputs) for layer in", "assigns unique layer IDs.\"\"\" if layer_name not in self._LAYER_UIDS: self._LAYER_UIDS[layer_name] = 1 return", "# Weight decay loss for layer in self.layers: for var in layer.vars.values(): if", "tf.variable_scope(self.name): self._build() # Build sequential layer model self.activations.append(self.inputs) for layer in self.layers: hidden", "not sess: raise AttributeError(\"TensorFlow session not provided.\") saver = tf.train.Saver(self.vars) save_path = saver.save(sess,", "return self._LAYER_UIDS[layer_name] def predict(self): pass def _loss(self): raise NotImplementedError def _accuracy(self): raise NotImplementedError", "from metrics import * class Model(object): def __init__(self, **kwargs): name = kwargs.get('name') if", "variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) self.vars = {var.name: var for var in variables} #", "self._LAYER_UIDS[layer_name] def predict(self): pass def _loss(self): raise NotImplementedError def _accuracy(self): raise NotImplementedError def", "0 self.accuracy = 0 self.optimizer = None self.opt_op = None def _build(self): raise", "self._build() # Build sequential layer model self.activations.append(self.inputs) for layer in self.layers: hidden =", "self.accuracy = 0 self.optimizer = None self.opt_op = None def _build(self): raise NotImplementedError", "and Model Parameters ##################################################### ''' attention_variables = tf.get_collection('ATTENTION_WEIGHTS', scope=self.name) self.attention_op = self.optimizer.minimize(self.loss, var_list=attention_variables)", "raise NotImplementedError def save(self, sess=None): if not sess: raise AttributeError(\"TensorFlow session not provided.\")", "= tf.get_collection('ATTENTION_WEIGHTS', scope=self.name) self.attention_op = self.optimizer.minimize(self.loss, var_list=attention_variables) model_variables = tf.get_collection('MODEL_WEIGHTS', scope=self.name) self.model_op =", "# Build metrics self._loss() self._accuracy() # Joint Optimzer for Attention and Model Parameters", "= None def _build(self): raise NotImplementedError def build(self): \"\"\" Wrapper for _build() \"\"\"", "_loss(self): raise NotImplementedError def _accuracy(self): raise NotImplementedError def save(self, sess=None): if not sess:", "access variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) self.vars = {var.name: var for var in variables}", "saved in file: %s\" % save_path) def load(self, sess=None): if not sess: raise", "session not provided.\") saver = tf.train.Saver(self.vars) save_path = \"tmp/%s.ckpt\" % self.name saver.restore(sess, save_path)", "None self.outputs = None self.loss = 0 self.accuracy = 0 self.optimizer = None", "from file: %s\" % save_path) class GAT(Model): def __init__(self, configs, placeholders, input_dim, **kwargs):", "from layers import * from metrics import * class Model(object): def __init__(self, **kwargs):", "= 0 self.accuracy = 0 self.optimizer = None self.opt_op = None def _build(self):", "self.model_op = self.optimizer.minimize(self.loss, var_list=model_variables) ''' ##################################################### def get_layer_uid(self,layer_name=''): \"\"\"Helper function, assigns unique layer", "##################################################### def get_layer_uid(self,layer_name=''): \"\"\"Helper function, assigns unique layer IDs.\"\"\" if layer_name not in", "\"tmp/%s.ckpt\" % self.name) print(\"Model saved in file: %s\" % save_path) def load(self, sess=None):", "loss for layer in self.layers: for var in layer.vars.values(): if 'weight' in var.name:", "layers import * from metrics import * class Model(object): def __init__(self, **kwargs): name", "%s\" % save_path) def load(self, sess=None): if not sess: raise AttributeError(\"TensorFlow session not", "self.__class__.__name__.lower() self.name = name self._LAYER_UIDS = {} self.vars = {} self.placeholders = {}", "NotImplementedError def build(self): \"\"\" Wrapper for _build() \"\"\" with tf.variable_scope(self.name): self._build() # Build", "raise NotImplementedError def _accuracy(self): raise NotImplementedError def save(self, sess=None): if not sess: raise", "self.outputs = None self.loss = 0 self.accuracy = 0 self.optimizer = None self.opt_op", "def _accuracy(self): raise NotImplementedError def save(self, sess=None): if not sess: raise AttributeError(\"TensorFlow session", "= self.optimizer.minimize(self.loss, var_list=model_variables) ''' ##################################################### def get_layer_uid(self,layer_name=''): \"\"\"Helper function, assigns unique layer IDs.\"\"\"", "saver = tf.train.Saver(self.vars) save_path = saver.save(sess, \"tmp/%s.ckpt\" % self.name) print(\"Model saved in file:", "= kwargs.get('name') if not name: name = self.__class__.__name__.lower() self.name = name self._LAYER_UIDS =", "saver.restore(sess, save_path) print(\"Model restored from file: %s\" % save_path) class GAT(Model): def __init__(self,", "= masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'], self.placeholders['labels_mask']) self.loss += self.pred_error def _accuracy(self): self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'],", "NotImplementedError def save(self, sess=None): if not sess: raise AttributeError(\"TensorFlow session not provided.\") saver", "{} self.placeholders = {} self.layers = [] self.activations = [] self.inputs = None", "var.name: self.loss += self.configs['weight_decay'] * tf.nn.l2_loss(var) # Cross entropy error self.pred_error = masked_softmax_cross_entropy(self.outputs," ]
[ "argparse import shutil import base64 import glob import sys import os import promote", "= verbose self.devnull = open(os.devnull, \"w\") self.image = image parser = configparser.ConfigParser() parser.read(\"authentication.ini\")", "= glob.glob(\"./*.pyc\") for aFile in files_to_remove: os.remove(aFile) shutil.rmtree(\"__pycache__\") def run(self): self.docker_login_pub() self.build() self.push()", "= (\"docker login \" + \"--username {} \".format(self.dockerhub_username) + \"--password {}\".format(self.dockerhub_password)) subprocess.call(cmd, shell=True)", "sys.stdout.write(\"Done\") sys.stdout.flush() def clean(self): files_to_remove = glob.glob(\"./*.pyc\") for aFile in files_to_remove: os.remove(aFile) shutil.rmtree(\"__pycache__\")", "-t {} ../\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True)", "\" + \"--username {} \".format(self.dockerhub_priv_username) + \"--password {}\".format(self.dockerhub_priv_password)) subprocess.call(cmd, shell=True) def build(self): sys.stdout.write(\"Building", "Manage(object): def __init__(self, image, verbose=False): self.init_feedback() self.verbose = verbose self.devnull = open(os.devnull, \"w\")", "self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def promote(self):", "Container... \") sys.stdout.flush() cmd = (\"docker build -t {} ../\" .format(self.image)) if self.verbose:", "sys.stdout.flush() cmd = (\"docker build -t {} ../\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True)", "self.dockerhub_priv_username = base64.b64decode( parser.get(\"dockerhub_priv\", \"username\")).decode(\"utf-8\") self.dockerhub_priv_password = <PASSWORD>.b64decode( parser.get(\"dockerhub_priv\", \"password\")).decode(\"utf-8\") def init_feedback(self): print(\"#########################\")", "\" + \"--username {} \".format(self.dockerhub_username) + \"--password {}\".format(self.dockerhub_password)) subprocess.call(cmd, shell=True) def docker_login_priv(self): cmd", "__init__(self, image, verbose=False): self.init_feedback() self.verbose = verbose self.devnull = open(os.devnull, \"w\") self.image =", "subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def push(self): sys.stdout.write(\"Pushing Container... \") sys.stdout.flush()", "def init_feedback(self): print(\"#########################\") print(\"# Void Build Pipeline #\") print(\"#########################\") def docker_login_pub(self): cmd =", "def clean(self): files_to_remove = glob.glob(\"./*.pyc\") for aFile in files_to_remove: os.remove(aFile) shutil.rmtree(\"__pycache__\") def run(self):", "{} ../\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\")", "image parser = configparser.ConfigParser() parser.read(\"authentication.ini\") self.dockerhub_username = base64.b64decode( parser.get(\"dockerhub\", \"username\")).decode(\"utf-8\") self.dockerhub_password = <PASSWORD>4decode(", "= Manage(args.image, args.verbose) else: manage = Manage(args.image) manage.run() if __name__ == \"__main__\": main()", "subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def push(self): sys.stdout.write(\"Pushing", "output (default False)\") args = parser.parse_args() if args.verbose: manage = Manage(args.image, args.verbose) else:", "cmd = (\"docker login \" + \"--username {} \".format(self.dockerhub_username) + \"--password {}\".format(self.dockerhub_password)) subprocess.call(cmd,", "def docker_login_priv(self): cmd = (\"docker login dockerhub.paypalcorp.com:443 \" + \"--username {} \".format(self.dockerhub_priv_username) +", "= (\"docker login dockerhub.paypalcorp.com:443 \" + \"--username {} \".format(self.dockerhub_priv_username) + \"--password {}\".format(self.dockerhub_priv_password)) subprocess.call(cmd,", "import subprocess import argparse import shutil import base64 import glob import sys import", "subprocess import argparse import shutil import base64 import glob import sys import os", "Pipeline #\") print(\"#########################\") def docker_login_pub(self): cmd = (\"docker login \" + \"--username {}", "\"--password {}\".format(self.dockerhub_password)) subprocess.call(cmd, shell=True) def docker_login_priv(self): cmd = (\"docker login dockerhub.paypalcorp.com:443 \" +", "for aFile in files_to_remove: os.remove(aFile) shutil.rmtree(\"__pycache__\") def run(self): self.docker_login_pub() self.build() self.push() self.promote() self.clean()", "print(\"\") def push(self): sys.stdout.write(\"Pushing Container... \") sys.stdout.flush() cmd = (\"docker push {}\" .format(self.image))", "self.clean() self.docker_login_priv() def main(): parser = argparse.ArgumentParser() required = parser.add_argument_group('Required arguments') required.add_argument(\"-i\", \"--image\",", "action=\"store_true\", help=\"Show output (default False)\") args = parser.parse_args() if args.verbose: manage = Manage(args.image,", "\"username\")).decode(\"utf-8\") self.dockerhub_priv_password = <PASSWORD>.b64decode( parser.get(\"dockerhub_priv\", \"password\")).decode(\"utf-8\") def init_feedback(self): print(\"#########################\") print(\"# Void Build Pipeline", "self.image = image parser = configparser.ConfigParser() parser.read(\"authentication.ini\") self.dockerhub_username = base64.b64decode( parser.get(\"dockerhub\", \"username\")).decode(\"utf-8\") self.dockerhub_password", "print(\"# Void Build Pipeline #\") print(\"#########################\") def docker_login_pub(self): cmd = (\"docker login \"", "import argparse import shutil import base64 import glob import sys import os import", "\".format(self.dockerhub_priv_username) + \"--password {}\".format(self.dockerhub_priv_password)) subprocess.call(cmd, shell=True) def build(self): sys.stdout.write(\"Building Container... \") sys.stdout.flush() cmd", "Build Pipeline #\") print(\"#########################\") def docker_login_pub(self): cmd = (\"docker login \" + \"--username", "= <PASSWORD>4decode( parser.get(\"dockerhub\", \"password\")).decode(\"utf-8\") self.dockerhub_priv_username = base64.b64decode( parser.get(\"dockerhub_priv\", \"username\")).decode(\"utf-8\") self.dockerhub_priv_password = <PASSWORD>.b64decode( parser.get(\"dockerhub_priv\",", "def build(self): sys.stdout.write(\"Building Container... \") sys.stdout.flush() cmd = (\"docker build -t {} ../\"", "parser.add_argument_group('Required arguments') required.add_argument(\"-i\", \"--image\", action=\"store\", help=\"Docker image and tag\", required=True) required.add_argument(\"-v\", \"--verbose\", action=\"store_true\",", "parser.get(\"dockerhub\", \"password\")).decode(\"utf-8\") self.dockerhub_priv_username = base64.b64decode( parser.get(\"dockerhub_priv\", \"username\")).decode(\"utf-8\") self.dockerhub_priv_password = <PASSWORD>.b64decode( parser.get(\"dockerhub_priv\", \"password\")).decode(\"utf-8\") def", "argparse.ArgumentParser() required = parser.add_argument_group('Required arguments') required.add_argument(\"-i\", \"--image\", action=\"store\", help=\"Docker image and tag\", required=True)", "stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def promote(self): sys.stdout.write(\"Promoting Container... \") sys.stdout.flush() prom", "required=True) required.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Show output (default False)\") args = parser.parse_args() if args.verbose:", "push(self): sys.stdout.write(\"Pushing Container... \") sys.stdout.flush() cmd = (\"docker push {}\" .format(self.image)) if self.verbose:", "verbose=False): self.init_feedback() self.verbose = verbose self.devnull = open(os.devnull, \"w\") self.image = image parser", "base64.b64decode( parser.get(\"dockerhub_priv\", \"username\")).decode(\"utf-8\") self.dockerhub_priv_password = <PASSWORD>.b64decode( parser.get(\"dockerhub_priv\", \"password\")).decode(\"utf-8\") def init_feedback(self): print(\"#########################\") print(\"# Void", "import base64 import glob import sys import os import promote class Manage(object): def", "def main(): parser = argparse.ArgumentParser() required = parser.add_argument_group('Required arguments') required.add_argument(\"-i\", \"--image\", action=\"store\", help=\"Docker", "shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def promote(self): sys.stdout.write(\"Promoting Container... \") sys.stdout.flush() prom = promote.Promote(self.image)", "(\"docker build -t {} ../\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull,", "self.devnull = open(os.devnull, \"w\") self.image = image parser = configparser.ConfigParser() parser.read(\"authentication.ini\") self.dockerhub_username =", "Container... \") sys.stdout.flush() cmd = (\"docker push {}\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True)", "= base64.b64decode( parser.get(\"dockerhub\", \"username\")).decode(\"utf-8\") self.dockerhub_password = <PASSWORD>4decode( parser.get(\"dockerhub\", \"password\")).decode(\"utf-8\") self.dockerhub_priv_username = base64.b64decode( parser.get(\"dockerhub_priv\",", "= parser.parse_args() if args.verbose: manage = Manage(args.image, args.verbose) else: manage = Manage(args.image) manage.run()", "shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def push(self): sys.stdout.write(\"Pushing Container... \") sys.stdout.flush() cmd = (\"docker", "\") sys.stdout.flush() cmd = (\"docker build -t {} ../\" .format(self.image)) if self.verbose: subprocess.call(cmd,", "login \" + \"--username {} \".format(self.dockerhub_username) + \"--password {}\".format(self.dockerhub_password)) subprocess.call(cmd, shell=True) def docker_login_priv(self):", "def push(self): sys.stdout.write(\"Pushing Container... \") sys.stdout.flush() cmd = (\"docker push {}\" .format(self.image)) if", "{}\".format(self.dockerhub_password)) subprocess.call(cmd, shell=True) def docker_login_priv(self): cmd = (\"docker login dockerhub.paypalcorp.com:443 \" + \"--username", "parser.read(\"authentication.ini\") self.dockerhub_username = base64.b64decode( parser.get(\"dockerhub\", \"username\")).decode(\"utf-8\") self.dockerhub_password = <PASSWORD>4decode( parser.get(\"dockerhub\", \"password\")).decode(\"utf-8\") self.dockerhub_priv_username =", "self.push() self.promote() self.clean() self.docker_login_priv() def main(): parser = argparse.ArgumentParser() required = parser.add_argument_group('Required arguments')", "image and tag\", required=True) required.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Show output (default False)\") args =", "def __init__(self, image, verbose=False): self.init_feedback() self.verbose = verbose self.devnull = open(os.devnull, \"w\") self.image", "parser.get(\"dockerhub_priv\", \"password\")).decode(\"utf-8\") def init_feedback(self): print(\"#########################\") print(\"# Void Build Pipeline #\") print(\"#########################\") def docker_login_pub(self):", "args.verbose: manage = Manage(args.image, args.verbose) else: manage = Manage(args.image) manage.run() if __name__ ==", "subprocess.call(cmd, shell=True) def build(self): sys.stdout.write(\"Building Container... \") sys.stdout.flush() cmd = (\"docker build -t", "sys.stdout.flush() print(\"\") def promote(self): sys.stdout.write(\"Promoting Container... \") sys.stdout.flush() prom = promote.Promote(self.image) prom.run() sys.stdout.write(\"Done\")", "= image parser = configparser.ConfigParser() parser.read(\"authentication.ini\") self.dockerhub_username = base64.b64decode( parser.get(\"dockerhub\", \"username\")).decode(\"utf-8\") self.dockerhub_password =", "files_to_remove: os.remove(aFile) shutil.rmtree(\"__pycache__\") def run(self): self.docker_login_pub() self.build() self.push() self.promote() self.clean() self.docker_login_priv() def main():", "def docker_login_pub(self): cmd = (\"docker login \" + \"--username {} \".format(self.dockerhub_username) + \"--password", "\"password\")).decode(\"utf-8\") def init_feedback(self): print(\"#########################\") print(\"# Void Build Pipeline #\") print(\"#########################\") def docker_login_pub(self): cmd", "clean(self): files_to_remove = glob.glob(\"./*.pyc\") for aFile in files_to_remove: os.remove(aFile) shutil.rmtree(\"__pycache__\") def run(self): self.docker_login_pub()", "sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def promote(self): sys.stdout.write(\"Promoting Container... \") sys.stdout.flush() prom = promote.Promote(self.image) prom.run()", "(\"docker login dockerhub.paypalcorp.com:443 \" + \"--username {} \".format(self.dockerhub_priv_username) + \"--password {}\".format(self.dockerhub_priv_password)) subprocess.call(cmd, shell=True)", "help=\"Docker image and tag\", required=True) required.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Show output (default False)\") args", "Void Build Pipeline #\") print(\"#########################\") def docker_login_pub(self): cmd = (\"docker login \" +", "print(\"#########################\") def docker_login_pub(self): cmd = (\"docker login \" + \"--username {} \".format(self.dockerhub_username) +", "required = parser.add_argument_group('Required arguments') required.add_argument(\"-i\", \"--image\", action=\"store\", help=\"Docker image and tag\", required=True) required.add_argument(\"-v\",", "os import promote class Manage(object): def __init__(self, image, verbose=False): self.init_feedback() self.verbose = verbose", "stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def promote(self): sys.stdout.write(\"Promoting Container... \") sys.stdout.flush() prom =", "shutil.rmtree(\"__pycache__\") def run(self): self.docker_login_pub() self.build() self.push() self.promote() self.clean() self.docker_login_priv() def main(): parser =", "{} \".format(self.dockerhub_username) + \"--password {}\".format(self.dockerhub_password)) subprocess.call(cmd, shell=True) def docker_login_priv(self): cmd = (\"docker login", "\"--verbose\", action=\"store_true\", help=\"Show output (default False)\") args = parser.parse_args() if args.verbose: manage =", "<PASSWORD>.b64decode( parser.get(\"dockerhub_priv\", \"password\")).decode(\"utf-8\") def init_feedback(self): print(\"#########################\") print(\"# Void Build Pipeline #\") print(\"#########################\") def", "\"password\")).decode(\"utf-8\") self.dockerhub_priv_username = base64.b64decode( parser.get(\"dockerhub_priv\", \"username\")).decode(\"utf-8\") self.dockerhub_priv_password = <PASSWORD>.b64decode( parser.get(\"dockerhub_priv\", \"password\")).decode(\"utf-8\") def init_feedback(self):", "{} \".format(self.dockerhub_priv_username) + \"--password {}\".format(self.dockerhub_priv_password)) subprocess.call(cmd, shell=True) def build(self): sys.stdout.write(\"Building Container... \") sys.stdout.flush()", "class Manage(object): def __init__(self, image, verbose=False): self.init_feedback() self.verbose = verbose self.devnull = open(os.devnull,", "files_to_remove = glob.glob(\"./*.pyc\") for aFile in files_to_remove: os.remove(aFile) shutil.rmtree(\"__pycache__\") def run(self): self.docker_login_pub() self.build()", "print(\"#########################\") print(\"# Void Build Pipeline #\") print(\"#########################\") def docker_login_pub(self): cmd = (\"docker login", "shell=True) def build(self): sys.stdout.write(\"Building Container... \") sys.stdout.flush() cmd = (\"docker build -t {}", "(default False)\") args = parser.parse_args() if args.verbose: manage = Manage(args.image, args.verbose) else: manage", "self.build() self.push() self.promote() self.clean() self.docker_login_priv() def main(): parser = argparse.ArgumentParser() required = parser.add_argument_group('Required", "main(): parser = argparse.ArgumentParser() required = parser.add_argument_group('Required arguments') required.add_argument(\"-i\", \"--image\", action=\"store\", help=\"Docker image", "args = parser.parse_args() if args.verbose: manage = Manage(args.image, args.verbose) else: manage = Manage(args.image)", "+ \"--username {} \".format(self.dockerhub_priv_username) + \"--password {}\".format(self.dockerhub_priv_password)) subprocess.call(cmd, shell=True) def build(self): sys.stdout.write(\"Building Container...", "import configparser import subprocess import argparse import shutil import base64 import glob import", "parser.get(\"dockerhub\", \"username\")).decode(\"utf-8\") self.dockerhub_password = <PASSWORD>4decode( parser.get(\"dockerhub\", \"password\")).decode(\"utf-8\") self.dockerhub_priv_username = base64.b64decode( parser.get(\"dockerhub_priv\", \"username\")).decode(\"utf-8\") self.dockerhub_priv_password", ".format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\")", "\"--username {} \".format(self.dockerhub_priv_username) + \"--password {}\".format(self.dockerhub_priv_password)) subprocess.call(cmd, shell=True) def build(self): sys.stdout.write(\"Building Container... \")", "= promote.Promote(self.image) prom.run() sys.stdout.write(\"Done\") sys.stdout.flush() def clean(self): files_to_remove = glob.glob(\"./*.pyc\") for aFile in", "required.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Show output (default False)\") args = parser.parse_args() if args.verbose: manage", "sys.stdout.flush() prom = promote.Promote(self.image) prom.run() sys.stdout.write(\"Done\") sys.stdout.flush() def clean(self): files_to_remove = glob.glob(\"./*.pyc\") for", "subprocess.call(cmd, shell=True) def docker_login_priv(self): cmd = (\"docker login dockerhub.paypalcorp.com:443 \" + \"--username {}", "sys.stdout.write(\"Promoting Container... \") sys.stdout.flush() prom = promote.Promote(self.image) prom.run() sys.stdout.write(\"Done\") sys.stdout.flush() def clean(self): files_to_remove", "False)\") args = parser.parse_args() if args.verbose: manage = Manage(args.image, args.verbose) else: manage =", "stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def push(self): sys.stdout.write(\"Pushing Container... \") sys.stdout.flush() cmd =", "sys.stdout.flush() def clean(self): files_to_remove = glob.glob(\"./*.pyc\") for aFile in files_to_remove: os.remove(aFile) shutil.rmtree(\"__pycache__\") def", "+ \"--username {} \".format(self.dockerhub_username) + \"--password {}\".format(self.dockerhub_password)) subprocess.call(cmd, shell=True) def docker_login_priv(self): cmd =", "\"--image\", action=\"store\", help=\"Docker image and tag\", required=True) required.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Show output (default", "{}\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush()", "configparser.ConfigParser() parser.read(\"authentication.ini\") self.dockerhub_username = base64.b64decode( parser.get(\"dockerhub\", \"username\")).decode(\"utf-8\") self.dockerhub_password = <PASSWORD>4decode( parser.get(\"dockerhub\", \"password\")).decode(\"utf-8\") self.dockerhub_priv_username", "\"w\") self.image = image parser = configparser.ConfigParser() parser.read(\"authentication.ini\") self.dockerhub_username = base64.b64decode( parser.get(\"dockerhub\", \"username\")).decode(\"utf-8\")", "Container... \") sys.stdout.flush() prom = promote.Promote(self.image) prom.run() sys.stdout.write(\"Done\") sys.stdout.flush() def clean(self): files_to_remove =", "cmd = (\"docker push {}\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull,", "self.dockerhub_username = base64.b64decode( parser.get(\"dockerhub\", \"username\")).decode(\"utf-8\") self.dockerhub_password = <PASSWORD>4decode( parser.get(\"dockerhub\", \"password\")).decode(\"utf-8\") self.dockerhub_priv_username = base64.b64decode(", "promote class Manage(object): def __init__(self, image, verbose=False): self.init_feedback() self.verbose = verbose self.devnull =", "= argparse.ArgumentParser() required = parser.add_argument_group('Required arguments') required.add_argument(\"-i\", \"--image\", action=\"store\", help=\"Docker image and tag\",", "\"username\")).decode(\"utf-8\") self.dockerhub_password = <PASSWORD>4decode( parser.get(\"dockerhub\", \"password\")).decode(\"utf-8\") self.dockerhub_priv_username = base64.b64decode( parser.get(\"dockerhub_priv\", \"username\")).decode(\"utf-8\") self.dockerhub_priv_password =", "def run(self): self.docker_login_pub() self.build() self.push() self.promote() self.clean() self.docker_login_priv() def main(): parser = argparse.ArgumentParser()", "stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def push(self): sys.stdout.write(\"Pushing Container... \") sys.stdout.flush() cmd", "+ \"--password {}\".format(self.dockerhub_priv_password)) subprocess.call(cmd, shell=True) def build(self): sys.stdout.write(\"Building Container... \") sys.stdout.flush() cmd =", "sys.stdout.flush() cmd = (\"docker push {}\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd,", "self.dockerhub_password = <PASSWORD>4decode( parser.get(\"dockerhub\", \"password\")).decode(\"utf-8\") self.dockerhub_priv_username = base64.b64decode( parser.get(\"dockerhub_priv\", \"username\")).decode(\"utf-8\") self.dockerhub_priv_password = <PASSWORD>.b64decode(", "self.init_feedback() self.verbose = verbose self.devnull = open(os.devnull, \"w\") self.image = image parser =", "#\") print(\"#########################\") def docker_login_pub(self): cmd = (\"docker login \" + \"--username {} \".format(self.dockerhub_username)", "configparser import subprocess import argparse import shutil import base64 import glob import sys", "print(\"\") def promote(self): sys.stdout.write(\"Promoting Container... \") sys.stdout.flush() prom = promote.Promote(self.image) prom.run() sys.stdout.write(\"Done\") sys.stdout.flush()", "shutil import base64 import glob import sys import os import promote class Manage(object):", "import promote class Manage(object): def __init__(self, image, verbose=False): self.init_feedback() self.verbose = verbose self.devnull", "glob.glob(\"./*.pyc\") for aFile in files_to_remove: os.remove(aFile) shutil.rmtree(\"__pycache__\") def run(self): self.docker_login_pub() self.build() self.push() self.promote()", "verbose self.devnull = open(os.devnull, \"w\") self.image = image parser = configparser.ConfigParser() parser.read(\"authentication.ini\") self.dockerhub_username", "aFile in files_to_remove: os.remove(aFile) shutil.rmtree(\"__pycache__\") def run(self): self.docker_login_pub() self.build() self.push() self.promote() self.clean() self.docker_login_priv()", "required.add_argument(\"-i\", \"--image\", action=\"store\", help=\"Docker image and tag\", required=True) required.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Show output", "help=\"Show output (default False)\") args = parser.parse_args() if args.verbose: manage = Manage(args.image, args.verbose)", "= open(os.devnull, \"w\") self.image = image parser = configparser.ConfigParser() parser.read(\"authentication.ini\") self.dockerhub_username = base64.b64decode(", "import sys import os import promote class Manage(object): def __init__(self, image, verbose=False): self.init_feedback()", "= <PASSWORD>.b64decode( parser.get(\"dockerhub_priv\", \"password\")).decode(\"utf-8\") def init_feedback(self): print(\"#########################\") print(\"# Void Build Pipeline #\") print(\"#########################\")", "open(os.devnull, \"w\") self.image = image parser = configparser.ConfigParser() parser.read(\"authentication.ini\") self.dockerhub_username = base64.b64decode( parser.get(\"dockerhub\",", "push {}\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\")", "self.verbose = verbose self.devnull = open(os.devnull, \"w\") self.image = image parser = configparser.ConfigParser()", "sys.stdout.flush() print(\"\") def push(self): sys.stdout.write(\"Pushing Container... \") sys.stdout.flush() cmd = (\"docker push {}\"", "<filename>build/manage.py import configparser import subprocess import argparse import shutil import base64 import glob", "cmd = (\"docker login dockerhub.paypalcorp.com:443 \" + \"--username {} \".format(self.dockerhub_priv_username) + \"--password {}\".format(self.dockerhub_priv_password))", "shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def push(self): sys.stdout.write(\"Pushing Container...", "self.promote() self.clean() self.docker_login_priv() def main(): parser = argparse.ArgumentParser() required = parser.add_argument_group('Required arguments') required.add_argument(\"-i\",", "\"--username {} \".format(self.dockerhub_username) + \"--password {}\".format(self.dockerhub_password)) subprocess.call(cmd, shell=True) def docker_login_priv(self): cmd = (\"docker", "= (\"docker build -t {} ../\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd,", "base64.b64decode( parser.get(\"dockerhub\", \"username\")).decode(\"utf-8\") self.dockerhub_password = <PASSWORD>4decode( parser.get(\"dockerhub\", \"password\")).decode(\"utf-8\") self.dockerhub_priv_username = base64.b64decode( parser.get(\"dockerhub_priv\", \"username\")).decode(\"utf-8\")", "\") sys.stdout.flush() cmd = (\"docker push {}\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else:", "sys.stdout.write(\"Pushing Container... \") sys.stdout.flush() cmd = (\"docker push {}\" .format(self.image)) if self.verbose: subprocess.call(cmd,", "= configparser.ConfigParser() parser.read(\"authentication.ini\") self.dockerhub_username = base64.b64decode( parser.get(\"dockerhub\", \"username\")).decode(\"utf-8\") self.dockerhub_password = <PASSWORD>4decode( parser.get(\"dockerhub\", \"password\")).decode(\"utf-8\")", "self.docker_login_priv() def main(): parser = argparse.ArgumentParser() required = parser.add_argument_group('Required arguments') required.add_argument(\"-i\", \"--image\", action=\"store\",", "image, verbose=False): self.init_feedback() self.verbose = verbose self.devnull = open(os.devnull, \"w\") self.image = image", "build -t {} ../\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull,", "docker_login_priv(self): cmd = (\"docker login dockerhub.paypalcorp.com:443 \" + \"--username {} \".format(self.dockerhub_priv_username) + \"--password", "prom.run() sys.stdout.write(\"Done\") sys.stdout.flush() def clean(self): files_to_remove = glob.glob(\"./*.pyc\") for aFile in files_to_remove: os.remove(aFile)", "subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def promote(self): sys.stdout.write(\"Promoting", "arguments') required.add_argument(\"-i\", \"--image\", action=\"store\", help=\"Docker image and tag\", required=True) required.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Show", "(\"docker push {}\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True)", "promote.Promote(self.image) prom.run() sys.stdout.write(\"Done\") sys.stdout.flush() def clean(self): files_to_remove = glob.glob(\"./*.pyc\") for aFile in files_to_remove:", "parser.get(\"dockerhub_priv\", \"username\")).decode(\"utf-8\") self.dockerhub_priv_password = <PASSWORD>.b64decode( parser.get(\"dockerhub_priv\", \"password\")).decode(\"utf-8\") def init_feedback(self): print(\"#########################\") print(\"# Void Build", "manage = Manage(args.image, args.verbose) else: manage = Manage(args.image) manage.run() if __name__ == \"__main__\":", "= base64.b64decode( parser.get(\"dockerhub_priv\", \"username\")).decode(\"utf-8\") self.dockerhub_priv_password = <PASSWORD>.b64decode( parser.get(\"dockerhub_priv\", \"password\")).decode(\"utf-8\") def init_feedback(self): print(\"#########################\") print(\"#", "build(self): sys.stdout.write(\"Building Container... \") sys.stdout.flush() cmd = (\"docker build -t {} ../\" .format(self.image))", "= (\"docker push {}\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull,", "<PASSWORD>4decode( parser.get(\"dockerhub\", \"password\")).decode(\"utf-8\") self.dockerhub_priv_username = base64.b64decode( parser.get(\"dockerhub_priv\", \"username\")).decode(\"utf-8\") self.dockerhub_priv_password = <PASSWORD>.b64decode( parser.get(\"dockerhub_priv\", \"password\")).decode(\"utf-8\")", "parser = argparse.ArgumentParser() required = parser.add_argument_group('Required arguments') required.add_argument(\"-i\", \"--image\", action=\"store\", help=\"Docker image and", "tag\", required=True) required.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Show output (default False)\") args = parser.parse_args() if", "parser.parse_args() if args.verbose: manage = Manage(args.image, args.verbose) else: manage = Manage(args.image) manage.run() if", "self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def push(self):", "else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def push(self): sys.stdout.write(\"Pushing Container... \")", "\") sys.stdout.flush() prom = promote.Promote(self.image) prom.run() sys.stdout.write(\"Done\") sys.stdout.flush() def clean(self): files_to_remove = glob.glob(\"./*.pyc\")", "shell=True) def docker_login_priv(self): cmd = (\"docker login dockerhub.paypalcorp.com:443 \" + \"--username {} \".format(self.dockerhub_priv_username)", "if self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def", "else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def promote(self): sys.stdout.write(\"Promoting Container... \")", "run(self): self.docker_login_pub() self.build() self.push() self.promote() self.clean() self.docker_login_priv() def main(): parser = argparse.ArgumentParser() required", "docker_login_pub(self): cmd = (\"docker login \" + \"--username {} \".format(self.dockerhub_username) + \"--password {}\".format(self.dockerhub_password))", "\".format(self.dockerhub_username) + \"--password {}\".format(self.dockerhub_password)) subprocess.call(cmd, shell=True) def docker_login_priv(self): cmd = (\"docker login dockerhub.paypalcorp.com:443", "base64 import glob import sys import os import promote class Manage(object): def __init__(self,", "= parser.add_argument_group('Required arguments') required.add_argument(\"-i\", \"--image\", action=\"store\", help=\"Docker image and tag\", required=True) required.add_argument(\"-v\", \"--verbose\",", "self.dockerhub_priv_password = <PASSWORD>.b64decode( parser.get(\"dockerhub_priv\", \"password\")).decode(\"utf-8\") def init_feedback(self): print(\"#########################\") print(\"# Void Build Pipeline #\")", "sys import os import promote class Manage(object): def __init__(self, image, verbose=False): self.init_feedback() self.verbose", "parser = configparser.ConfigParser() parser.read(\"authentication.ini\") self.dockerhub_username = base64.b64decode( parser.get(\"dockerhub\", \"username\")).decode(\"utf-8\") self.dockerhub_password = <PASSWORD>4decode( parser.get(\"dockerhub\",", "and tag\", required=True) required.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Show output (default False)\") args = parser.parse_args()", "dockerhub.paypalcorp.com:443 \" + \"--username {} \".format(self.dockerhub_priv_username) + \"--password {}\".format(self.dockerhub_priv_password)) subprocess.call(cmd, shell=True) def build(self):", "glob import sys import os import promote class Manage(object): def __init__(self, image, verbose=False):", "promote(self): sys.stdout.write(\"Promoting Container... \") sys.stdout.flush() prom = promote.Promote(self.image) prom.run() sys.stdout.write(\"Done\") sys.stdout.flush() def clean(self):", "(\"docker login \" + \"--username {} \".format(self.dockerhub_username) + \"--password {}\".format(self.dockerhub_password)) subprocess.call(cmd, shell=True) def", "os.remove(aFile) shutil.rmtree(\"__pycache__\") def run(self): self.docker_login_pub() self.build() self.push() self.promote() self.clean() self.docker_login_priv() def main(): parser", "import shutil import base64 import glob import sys import os import promote class", "sys.stdout.write(\"Building Container... \") sys.stdout.flush() cmd = (\"docker build -t {} ../\" .format(self.image)) if", "\"--password {}\".format(self.dockerhub_priv_password)) subprocess.call(cmd, shell=True) def build(self): sys.stdout.write(\"Building Container... \") sys.stdout.flush() cmd = (\"docker", "login dockerhub.paypalcorp.com:443 \" + \"--username {} \".format(self.dockerhub_priv_username) + \"--password {}\".format(self.dockerhub_priv_password)) subprocess.call(cmd, shell=True) def", "cmd = (\"docker build -t {} ../\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else:", "init_feedback(self): print(\"#########################\") print(\"# Void Build Pipeline #\") print(\"#########################\") def docker_login_pub(self): cmd = (\"docker", "import glob import sys import os import promote class Manage(object): def __init__(self, image,", "+ \"--password {}\".format(self.dockerhub_password)) subprocess.call(cmd, shell=True) def docker_login_priv(self): cmd = (\"docker login dockerhub.paypalcorp.com:443 \"", "in files_to_remove: os.remove(aFile) shutil.rmtree(\"__pycache__\") def run(self): self.docker_login_pub() self.build() self.push() self.promote() self.clean() self.docker_login_priv() def", "subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def promote(self): sys.stdout.write(\"Promoting Container... \") sys.stdout.flush()", "../\" .format(self.image)) if self.verbose: subprocess.call(cmd, shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush()", "prom = promote.Promote(self.image) prom.run() sys.stdout.write(\"Done\") sys.stdout.flush() def clean(self): files_to_remove = glob.glob(\"./*.pyc\") for aFile", "action=\"store\", help=\"Docker image and tag\", required=True) required.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Show output (default False)\")", "{}\".format(self.dockerhub_priv_password)) subprocess.call(cmd, shell=True) def build(self): sys.stdout.write(\"Building Container... \") sys.stdout.flush() cmd = (\"docker build", "def promote(self): sys.stdout.write(\"Promoting Container... \") sys.stdout.flush() prom = promote.Promote(self.image) prom.run() sys.stdout.write(\"Done\") sys.stdout.flush() def", "shell=True) else: subprocess.call(cmd, stdout=self.devnull, stderr=self.devnull, shell=True) sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def promote(self): sys.stdout.write(\"Promoting Container...", "sys.stdout.write(\"Done\") sys.stdout.flush() print(\"\") def push(self): sys.stdout.write(\"Pushing Container... \") sys.stdout.flush() cmd = (\"docker push", "if args.verbose: manage = Manage(args.image, args.verbose) else: manage = Manage(args.image) manage.run() if __name__", "import os import promote class Manage(object): def __init__(self, image, verbose=False): self.init_feedback() self.verbose =", "self.docker_login_pub() self.build() self.push() self.promote() self.clean() self.docker_login_priv() def main(): parser = argparse.ArgumentParser() required =" ]
[ "= db.Column(db.String(80), index=True, unique=True) email = db.Column(db.String(120), index=True, unique=True) display_name = db.Column(db.String(80), index=True,", "class Profile(Base, db.Model): __tablename__ = 'profile' username = db.Column(db.String(80), index=True, unique=True) email =", "db.ForeignKey('user.id')) ) socialmedianetworks = db.Table( 'socialmedianetworks', db.Column('socialmedianetwork_id', UUID, db.ForeignKey('socialmedianetwork.id')), # noqa db.Column('profile_id', UUID,", "= db.Column(db.String(80), index=True) user_id = db.Column(UUID, db.ForeignKey('user.id')) social_media_networks = db.relationship( 'SocialMediaNetwork', backref=db.backref('user', lazy='dynamic'),", "self.email = email if not display_name: self.display_name = username else: self.display_name = display_name", "db.Column(db.String(80), unique=True) def __init__(self, name): self.name = name class SocialMediaNetwork(Base, db.Model): __tablename__ =", "display_name self.first_name = first_name self.last_name = last_name self.social_media_networks = social_media_networks self.user_id = user_id", "UUID from .. import db from . import Base roles = db.Table( 'roles',", "username = db.Column(db.String(80), index=True, unique=True) email = db.Column(db.String(120), index=True, unique=True) display_name = db.Column(db.String(80),", "db.Column(db.String(80), index=True) last_name = db.Column(db.String(80), index=True) user_id = db.Column(UUID, db.ForeignKey('user.id')) social_media_networks = db.relationship(", "import Base roles = db.Table( 'roles', db.Column('role_id', UUID, db.ForeignKey('role.id')), db.Column('user_id', UUID, db.ForeignKey('user.id')) )", "'socialmedianetwork' network = db.Column(db.String(10)) username = db.Column(db.String(255)) def __init__(self, network, username): self.network =", "email = db.Column(db.String(120), index=True, unique=True) display_name = db.Column(db.String(80), index=True, unique=True) first_name = db.Column(db.String(80),", "'SocialMediaNetwork', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=socialmedianetworks) def __init__(self, username, email, first_name=None, last_name=None, display_name=None, user_id=None,", "= db.Column(db.String(80), index=True, unique=True) first_name = db.Column(db.String(80), index=True) last_name = db.Column(db.String(80), index=True) user_id", "index=True, unique=True) email = db.Column(db.String(120), index=True, unique=True) display_name = db.Column(db.String(80), index=True, unique=True) first_name", "SocialMediaNetwork(Base, db.Model): __tablename__ = 'socialmedianetwork' network = db.Column(db.String(10)) username = db.Column(db.String(255)) def __init__(self,", "display_name: self.display_name = username else: self.display_name = display_name self.first_name = first_name self.last_name =", "= 'role' name = db.Column(db.String(80), unique=True) def __init__(self, name): self.name = name class", "user_id = db.Column(UUID, db.ForeignKey('user.id')) social_media_networks = db.relationship( 'SocialMediaNetwork', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=socialmedianetworks) def", "= display_name self.first_name = first_name self.last_name = last_name self.social_media_networks = social_media_networks self.user_id =", "noqa db.Column('profile_id', UUID, db.ForeignKey('profile.id')) ) class User(Base, db.Model): __tablename__ = 'user' openid =", "unique=True) display_name = db.Column(db.String(80), index=True, unique=True) first_name = db.Column(db.String(80), index=True) last_name = db.Column(db.String(80),", "= db.Table( 'roles', db.Column('role_id', UUID, db.ForeignKey('role.id')), db.Column('user_id', UUID, db.ForeignKey('user.id')) ) socialmedianetworks = db.Table(", "else: self.display_name = display_name self.first_name = first_name self.last_name = last_name self.social_media_networks = social_media_networks", "index=True, unique=True) first_name = db.Column(db.String(80), index=True) last_name = db.Column(db.String(80), index=True) user_id = db.Column(UUID,", "= db.Column(db.String(10)) username = db.Column(db.String(255)) def __init__(self, network, username): self.network = network self.username", "db.Column('socialmedianetwork_id', UUID, db.ForeignKey('socialmedianetwork.id')), # noqa db.Column('profile_id', UUID, db.ForeignKey('profile.id')) ) class User(Base, db.Model): __tablename__", "from .. import db from . import Base roles = db.Table( 'roles', db.Column('role_id',", "'profile' username = db.Column(db.String(80), index=True, unique=True) email = db.Column(db.String(120), index=True, unique=True) display_name =", "lazy='dynamic'), lazy='dynamic', secondary=roles) def __init__(self, openid): self.openid = openid class Role(Base, db.Model): __tablename__", "UUID, db.ForeignKey('profile.id')) ) class User(Base, db.Model): __tablename__ = 'user' openid = db.Column(db.String(200)) profile", "self.openid = openid class Role(Base, db.Model): __tablename__ = 'role' name = db.Column(db.String(80), unique=True)", "User(Base, db.Model): __tablename__ = 'user' openid = db.Column(db.String(200)) profile = db.relationship('Profile', backref='user', lazy='dynamic',", "= openid class Role(Base, db.Model): __tablename__ = 'role' name = db.Column(db.String(80), unique=True) def", "import db from . import Base roles = db.Table( 'roles', db.Column('role_id', UUID, db.ForeignKey('role.id')),", "UUID, db.ForeignKey('user.id')) ) socialmedianetworks = db.Table( 'socialmedianetworks', db.Column('socialmedianetwork_id', UUID, db.ForeignKey('socialmedianetwork.id')), # noqa db.Column('profile_id',", "db.ForeignKey('socialmedianetwork.id')), # noqa db.Column('profile_id', UUID, db.ForeignKey('profile.id')) ) class User(Base, db.Model): __tablename__ = 'user'", "db.Column(db.String(10)) username = db.Column(db.String(255)) def __init__(self, network, username): self.network = network self.username =", "self.network = network self.username = username class Profile(Base, db.Model): __tablename__ = 'profile' username", "index=True) user_id = db.Column(UUID, db.ForeignKey('user.id')) social_media_networks = db.relationship( 'SocialMediaNetwork', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=socialmedianetworks)", "db.relationship('Profile', backref='user', lazy='dynamic', uselist=False) roles = db.relationship('Role', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=roles) def __init__(self,", "db.Column(UUID, db.ForeignKey('user.id')) social_media_networks = db.relationship( 'SocialMediaNetwork', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=socialmedianetworks) def __init__(self, username,", "__tablename__ = 'role' name = db.Column(db.String(80), unique=True) def __init__(self, name): self.name = name", "self.display_name = display_name self.first_name = first_name self.last_name = last_name self.social_media_networks = social_media_networks self.user_id", "Role(Base, db.Model): __tablename__ = 'role' name = db.Column(db.String(80), unique=True) def __init__(self, name): self.name", "= db.Column(db.String(80), index=True) last_name = db.Column(db.String(80), index=True) user_id = db.Column(UUID, db.ForeignKey('user.id')) social_media_networks =", "first_name=None, last_name=None, display_name=None, user_id=None, social_media_networks=None): self.username = username self.email = email if not", "def __init__(self, name): self.name = name class SocialMediaNetwork(Base, db.Model): __tablename__ = 'socialmedianetwork' network", "unique=True) def __init__(self, name): self.name = name class SocialMediaNetwork(Base, db.Model): __tablename__ = 'socialmedianetwork'", "= db.Table( 'socialmedianetworks', db.Column('socialmedianetwork_id', UUID, db.ForeignKey('socialmedianetwork.id')), # noqa db.Column('profile_id', UUID, db.ForeignKey('profile.id')) ) class", "'socialmedianetworks', db.Column('socialmedianetwork_id', UUID, db.ForeignKey('socialmedianetwork.id')), # noqa db.Column('profile_id', UUID, db.ForeignKey('profile.id')) ) class User(Base, db.Model):", "openid class Role(Base, db.Model): __tablename__ = 'role' name = db.Column(db.String(80), unique=True) def __init__(self,", "self.display_name = username else: self.display_name = display_name self.first_name = first_name self.last_name = last_name", "__init__(self, name): self.name = name class SocialMediaNetwork(Base, db.Model): __tablename__ = 'socialmedianetwork' network =", "db.relationship( 'SocialMediaNetwork', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=socialmedianetworks) def __init__(self, username, email, first_name=None, last_name=None, display_name=None,", "= db.relationship('Role', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=roles) def __init__(self, openid): self.openid = openid class", "'user' openid = db.Column(db.String(200)) profile = db.relationship('Profile', backref='user', lazy='dynamic', uselist=False) roles = db.relationship('Role',", "= email if not display_name: self.display_name = username else: self.display_name = display_name self.first_name", "sqlalchemy.dialects.postgresql import UUID from .. import db from . import Base roles =", "index=True) last_name = db.Column(db.String(80), index=True) user_id = db.Column(UUID, db.ForeignKey('user.id')) social_media_networks = db.relationship( 'SocialMediaNetwork',", "def __init__(self, username, email, first_name=None, last_name=None, display_name=None, user_id=None, social_media_networks=None): self.username = username self.email", "openid = db.Column(db.String(200)) profile = db.relationship('Profile', backref='user', lazy='dynamic', uselist=False) roles = db.relationship('Role', backref=db.backref('user',", "db.Table( 'roles', db.Column('role_id', UUID, db.ForeignKey('role.id')), db.Column('user_id', UUID, db.ForeignKey('user.id')) ) socialmedianetworks = db.Table( 'socialmedianetworks',", "uselist=False) roles = db.relationship('Role', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=roles) def __init__(self, openid): self.openid =", "def __init__(self, openid): self.openid = openid class Role(Base, db.Model): __tablename__ = 'role' name", "from sqlalchemy.dialects.postgresql import UUID from .. import db from . import Base roles", "def __init__(self, network, username): self.network = network self.username = username class Profile(Base, db.Model):", "Base roles = db.Table( 'roles', db.Column('role_id', UUID, db.ForeignKey('role.id')), db.Column('user_id', UUID, db.ForeignKey('user.id')) ) socialmedianetworks", "db from . import Base roles = db.Table( 'roles', db.Column('role_id', UUID, db.ForeignKey('role.id')), db.Column('user_id',", "network, username): self.network = network self.username = username class Profile(Base, db.Model): __tablename__ =", "unique=True) email = db.Column(db.String(120), index=True, unique=True) display_name = db.Column(db.String(80), index=True, unique=True) first_name =", "# noqa db.Column('profile_id', UUID, db.ForeignKey('profile.id')) ) class User(Base, db.Model): __tablename__ = 'user' openid", "Profile(Base, db.Model): __tablename__ = 'profile' username = db.Column(db.String(80), index=True, unique=True) email = db.Column(db.String(120),", "__init__(self, username, email, first_name=None, last_name=None, display_name=None, user_id=None, social_media_networks=None): self.username = username self.email =", "secondary=roles) def __init__(self, openid): self.openid = openid class Role(Base, db.Model): __tablename__ = 'role'", "email if not display_name: self.display_name = username else: self.display_name = display_name self.first_name =", "= 'socialmedianetwork' network = db.Column(db.String(10)) username = db.Column(db.String(255)) def __init__(self, network, username): self.network", "from . import Base roles = db.Table( 'roles', db.Column('role_id', UUID, db.ForeignKey('role.id')), db.Column('user_id', UUID,", "db.Model): __tablename__ = 'role' name = db.Column(db.String(80), unique=True) def __init__(self, name): self.name =", "name): self.name = name class SocialMediaNetwork(Base, db.Model): __tablename__ = 'socialmedianetwork' network = db.Column(db.String(10))", "db.Column(db.String(80), index=True, unique=True) email = db.Column(db.String(120), index=True, unique=True) display_name = db.Column(db.String(80), index=True, unique=True)", "lazy='dynamic', secondary=socialmedianetworks) def __init__(self, username, email, first_name=None, last_name=None, display_name=None, user_id=None, social_media_networks=None): self.username =", "class Role(Base, db.Model): __tablename__ = 'role' name = db.Column(db.String(80), unique=True) def __init__(self, name):", "db.Column('role_id', UUID, db.ForeignKey('role.id')), db.Column('user_id', UUID, db.ForeignKey('user.id')) ) socialmedianetworks = db.Table( 'socialmedianetworks', db.Column('socialmedianetwork_id', UUID,", "'role' name = db.Column(db.String(80), unique=True) def __init__(self, name): self.name = name class SocialMediaNetwork(Base,", "socialmedianetworks = db.Table( 'socialmedianetworks', db.Column('socialmedianetwork_id', UUID, db.ForeignKey('socialmedianetwork.id')), # noqa db.Column('profile_id', UUID, db.ForeignKey('profile.id')) )", "db.Model): __tablename__ = 'user' openid = db.Column(db.String(200)) profile = db.relationship('Profile', backref='user', lazy='dynamic', uselist=False)", "profile = db.relationship('Profile', backref='user', lazy='dynamic', uselist=False) roles = db.relationship('Role', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=roles)", "username, email, first_name=None, last_name=None, display_name=None, user_id=None, social_media_networks=None): self.username = username self.email = email", "secondary=socialmedianetworks) def __init__(self, username, email, first_name=None, last_name=None, display_name=None, user_id=None, social_media_networks=None): self.username = username", "class User(Base, db.Model): __tablename__ = 'user' openid = db.Column(db.String(200)) profile = db.relationship('Profile', backref='user',", ".. import db from . import Base roles = db.Table( 'roles', db.Column('role_id', UUID,", ") socialmedianetworks = db.Table( 'socialmedianetworks', db.Column('socialmedianetwork_id', UUID, db.ForeignKey('socialmedianetwork.id')), # noqa db.Column('profile_id', UUID, db.ForeignKey('profile.id'))", "db.Table( 'socialmedianetworks', db.Column('socialmedianetwork_id', UUID, db.ForeignKey('socialmedianetwork.id')), # noqa db.Column('profile_id', UUID, db.ForeignKey('profile.id')) ) class User(Base,", "__init__(self, network, username): self.network = network self.username = username class Profile(Base, db.Model): __tablename__", "db.Column(db.String(80), index=True, unique=True) first_name = db.Column(db.String(80), index=True) last_name = db.Column(db.String(80), index=True) user_id =", "name = db.Column(db.String(80), unique=True) def __init__(self, name): self.name = name class SocialMediaNetwork(Base, db.Model):", "__init__(self, openid): self.openid = openid class Role(Base, db.Model): __tablename__ = 'role' name =", "db.relationship('Role', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=roles) def __init__(self, openid): self.openid = openid class Role(Base,", "= username class Profile(Base, db.Model): __tablename__ = 'profile' username = db.Column(db.String(80), index=True, unique=True)", "db.Model): __tablename__ = 'socialmedianetwork' network = db.Column(db.String(10)) username = db.Column(db.String(255)) def __init__(self, network,", "= db.Column(db.String(255)) def __init__(self, network, username): self.network = network self.username = username class", "'roles', db.Column('role_id', UUID, db.ForeignKey('role.id')), db.Column('user_id', UUID, db.ForeignKey('user.id')) ) socialmedianetworks = db.Table( 'socialmedianetworks', db.Column('socialmedianetwork_id',", "unique=True) first_name = db.Column(db.String(80), index=True) last_name = db.Column(db.String(80), index=True) user_id = db.Column(UUID, db.ForeignKey('user.id'))", "= db.Column(UUID, db.ForeignKey('user.id')) social_media_networks = db.relationship( 'SocialMediaNetwork', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=socialmedianetworks) def __init__(self,", "display_name=None, user_id=None, social_media_networks=None): self.username = username self.email = email if not display_name: self.display_name", "db.Column('user_id', UUID, db.ForeignKey('user.id')) ) socialmedianetworks = db.Table( 'socialmedianetworks', db.Column('socialmedianetwork_id', UUID, db.ForeignKey('socialmedianetwork.id')), # noqa", "username class Profile(Base, db.Model): __tablename__ = 'profile' username = db.Column(db.String(80), index=True, unique=True) email", "db.ForeignKey('profile.id')) ) class User(Base, db.Model): __tablename__ = 'user' openid = db.Column(db.String(200)) profile =", "lazy='dynamic'), lazy='dynamic', secondary=socialmedianetworks) def __init__(self, username, email, first_name=None, last_name=None, display_name=None, user_id=None, social_media_networks=None): self.username", "username else: self.display_name = display_name self.first_name = first_name self.last_name = last_name self.social_media_networks =", "db.Model): __tablename__ = 'profile' username = db.Column(db.String(80), index=True, unique=True) email = db.Column(db.String(120), index=True,", "class SocialMediaNetwork(Base, db.Model): __tablename__ = 'socialmedianetwork' network = db.Column(db.String(10)) username = db.Column(db.String(255)) def", "db.Column(db.String(120), index=True, unique=True) display_name = db.Column(db.String(80), index=True, unique=True) first_name = db.Column(db.String(80), index=True) last_name", "= username self.email = email if not display_name: self.display_name = username else: self.display_name", "index=True, unique=True) display_name = db.Column(db.String(80), index=True, unique=True) first_name = db.Column(db.String(80), index=True) last_name =", "roles = db.Table( 'roles', db.Column('role_id', UUID, db.ForeignKey('role.id')), db.Column('user_id', UUID, db.ForeignKey('user.id')) ) socialmedianetworks =", "backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=socialmedianetworks) def __init__(self, username, email, first_name=None, last_name=None, display_name=None, user_id=None, social_media_networks=None):", "username = db.Column(db.String(255)) def __init__(self, network, username): self.network = network self.username = username", "network = db.Column(db.String(10)) username = db.Column(db.String(255)) def __init__(self, network, username): self.network = network", "= db.Column(db.String(200)) profile = db.relationship('Profile', backref='user', lazy='dynamic', uselist=False) roles = db.relationship('Role', backref=db.backref('user', lazy='dynamic'),", "UUID, db.ForeignKey('socialmedianetwork.id')), # noqa db.Column('profile_id', UUID, db.ForeignKey('profile.id')) ) class User(Base, db.Model): __tablename__ =", "backref='user', lazy='dynamic', uselist=False) roles = db.relationship('Role', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=roles) def __init__(self, openid):", "db.Column(db.String(200)) profile = db.relationship('Profile', backref='user', lazy='dynamic', uselist=False) roles = db.relationship('Role', backref=db.backref('user', lazy='dynamic'), lazy='dynamic',", "db.Column(db.String(80), index=True) user_id = db.Column(UUID, db.ForeignKey('user.id')) social_media_networks = db.relationship( 'SocialMediaNetwork', backref=db.backref('user', lazy='dynamic'), lazy='dynamic',", ") class User(Base, db.Model): __tablename__ = 'user' openid = db.Column(db.String(200)) profile = db.relationship('Profile',", "= db.relationship('Profile', backref='user', lazy='dynamic', uselist=False) roles = db.relationship('Role', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=roles) def", "self.name = name class SocialMediaNetwork(Base, db.Model): __tablename__ = 'socialmedianetwork' network = db.Column(db.String(10)) username", "backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=roles) def __init__(self, openid): self.openid = openid class Role(Base, db.Model):", "last_name = db.Column(db.String(80), index=True) user_id = db.Column(UUID, db.ForeignKey('user.id')) social_media_networks = db.relationship( 'SocialMediaNetwork', backref=db.backref('user',", "social_media_networks = db.relationship( 'SocialMediaNetwork', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=socialmedianetworks) def __init__(self, username, email, first_name=None,", "UUID, db.ForeignKey('role.id')), db.Column('user_id', UUID, db.ForeignKey('user.id')) ) socialmedianetworks = db.Table( 'socialmedianetworks', db.Column('socialmedianetwork_id', UUID, db.ForeignKey('socialmedianetwork.id')),", "username self.email = email if not display_name: self.display_name = username else: self.display_name =", "db.Column(db.String(255)) def __init__(self, network, username): self.network = network self.username = username class Profile(Base,", "roles = db.relationship('Role', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=roles) def __init__(self, openid): self.openid = openid", "self.username = username class Profile(Base, db.Model): __tablename__ = 'profile' username = db.Column(db.String(80), index=True,", "__tablename__ = 'user' openid = db.Column(db.String(200)) profile = db.relationship('Profile', backref='user', lazy='dynamic', uselist=False) roles", "db.ForeignKey('user.id')) social_media_networks = db.relationship( 'SocialMediaNetwork', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=socialmedianetworks) def __init__(self, username, email,", "= db.relationship( 'SocialMediaNetwork', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=socialmedianetworks) def __init__(self, username, email, first_name=None, last_name=None,", "= network self.username = username class Profile(Base, db.Model): __tablename__ = 'profile' username =", "social_media_networks=None): self.username = username self.email = email if not display_name: self.display_name = username", "__tablename__ = 'profile' username = db.Column(db.String(80), index=True, unique=True) email = db.Column(db.String(120), index=True, unique=True)", "lazy='dynamic', uselist=False) roles = db.relationship('Role', backref=db.backref('user', lazy='dynamic'), lazy='dynamic', secondary=roles) def __init__(self, openid): self.openid", "= db.Column(db.String(80), unique=True) def __init__(self, name): self.name = name class SocialMediaNetwork(Base, db.Model): __tablename__", "last_name=None, display_name=None, user_id=None, social_media_networks=None): self.username = username self.email = email if not display_name:", "= username else: self.display_name = display_name self.first_name = first_name self.last_name = last_name self.social_media_networks", "db.ForeignKey('role.id')), db.Column('user_id', UUID, db.ForeignKey('user.id')) ) socialmedianetworks = db.Table( 'socialmedianetworks', db.Column('socialmedianetwork_id', UUID, db.ForeignKey('socialmedianetwork.id')), #", "= 'user' openid = db.Column(db.String(200)) profile = db.relationship('Profile', backref='user', lazy='dynamic', uselist=False) roles =", "first_name = db.Column(db.String(80), index=True) last_name = db.Column(db.String(80), index=True) user_id = db.Column(UUID, db.ForeignKey('user.id')) social_media_networks", "= 'profile' username = db.Column(db.String(80), index=True, unique=True) email = db.Column(db.String(120), index=True, unique=True) display_name", "if not display_name: self.display_name = username else: self.display_name = display_name self.first_name = first_name", "name class SocialMediaNetwork(Base, db.Model): __tablename__ = 'socialmedianetwork' network = db.Column(db.String(10)) username = db.Column(db.String(255))", "openid): self.openid = openid class Role(Base, db.Model): __tablename__ = 'role' name = db.Column(db.String(80),", "email, first_name=None, last_name=None, display_name=None, user_id=None, social_media_networks=None): self.username = username self.email = email if", "self.username = username self.email = email if not display_name: self.display_name = username else:", "network self.username = username class Profile(Base, db.Model): __tablename__ = 'profile' username = db.Column(db.String(80),", ". import Base roles = db.Table( 'roles', db.Column('role_id', UUID, db.ForeignKey('role.id')), db.Column('user_id', UUID, db.ForeignKey('user.id'))", "not display_name: self.display_name = username else: self.display_name = display_name self.first_name = first_name self.last_name", "= name class SocialMediaNetwork(Base, db.Model): __tablename__ = 'socialmedianetwork' network = db.Column(db.String(10)) username =", "user_id=None, social_media_networks=None): self.username = username self.email = email if not display_name: self.display_name =", "username): self.network = network self.username = username class Profile(Base, db.Model): __tablename__ = 'profile'", "= db.Column(db.String(120), index=True, unique=True) display_name = db.Column(db.String(80), index=True, unique=True) first_name = db.Column(db.String(80), index=True)", "__tablename__ = 'socialmedianetwork' network = db.Column(db.String(10)) username = db.Column(db.String(255)) def __init__(self, network, username):", "lazy='dynamic', secondary=roles) def __init__(self, openid): self.openid = openid class Role(Base, db.Model): __tablename__ =", "display_name = db.Column(db.String(80), index=True, unique=True) first_name = db.Column(db.String(80), index=True) last_name = db.Column(db.String(80), index=True)", "import UUID from .. import db from . import Base roles = db.Table(", "db.Column('profile_id', UUID, db.ForeignKey('profile.id')) ) class User(Base, db.Model): __tablename__ = 'user' openid = db.Column(db.String(200))" ]
[ "# coding=utf-8 import json import os import sys import unittest from src.utils.payloadHelper import", "os import sys import unittest from src.utils.payloadHelper import PayloadHelper class MappedPayloadTests(unittest.TestCase): def setUp(self):", "import sys import unittest from src.utils.payloadHelper import PayloadHelper class MappedPayloadTests(unittest.TestCase): def setUp(self): pass", "script_dir = os.path.dirname(__file__) rel_path = 'data/source/heartbeatPayload.json' abs_file_path = os.path.join(script_dir, rel_path) with open(abs_file_path) as", "self.assertIsNotNone(payload.heartbeat) def tearDown(self): self.helper = None self.assertIsNone(self.helper) pass if __name__ == '__main__': unittest.main(exit=False)", "rel_path) with open(abs_file_path) as hartbeatData: self.hartbeatJson = json.load(hartbeatData) self.helper = PayloadHelper() for item", "test_hartbeat(self): script_dir = os.path.dirname(__file__) rel_path = 'data/source/heartbeatPayload.json' abs_file_path = os.path.join(script_dir, rel_path) with open(abs_file_path)", "import json import os import sys import unittest from src.utils.payloadHelper import PayloadHelper class", "unittest from src.utils.payloadHelper import PayloadHelper class MappedPayloadTests(unittest.TestCase): def setUp(self): pass def test_hartbeat(self): script_dir", "= os.path.join(script_dir, rel_path) with open(abs_file_path) as hartbeatData: self.hartbeatJson = json.load(hartbeatData) self.helper = PayloadHelper()", "PayloadHelper() for item in self.hartbeatJson: payload = self.helper.map(item) self.assertIsNotNone(payload.heartbeat) def tearDown(self): self.helper =", "pass def test_hartbeat(self): script_dir = os.path.dirname(__file__) rel_path = 'data/source/heartbeatPayload.json' abs_file_path = os.path.join(script_dir, rel_path)", "abs_file_path = os.path.join(script_dir, rel_path) with open(abs_file_path) as hartbeatData: self.hartbeatJson = json.load(hartbeatData) self.helper =", "MappedPayloadTests(unittest.TestCase): def setUp(self): pass def test_hartbeat(self): script_dir = os.path.dirname(__file__) rel_path = 'data/source/heartbeatPayload.json' abs_file_path", "= 'data/source/heartbeatPayload.json' abs_file_path = os.path.join(script_dir, rel_path) with open(abs_file_path) as hartbeatData: self.hartbeatJson = json.load(hartbeatData)", "as hartbeatData: self.hartbeatJson = json.load(hartbeatData) self.helper = PayloadHelper() for item in self.hartbeatJson: payload", "#!/usr/bin/env python3 # coding=utf-8 import json import os import sys import unittest from", "= os.path.dirname(__file__) rel_path = 'data/source/heartbeatPayload.json' abs_file_path = os.path.join(script_dir, rel_path) with open(abs_file_path) as hartbeatData:", "from src.utils.payloadHelper import PayloadHelper class MappedPayloadTests(unittest.TestCase): def setUp(self): pass def test_hartbeat(self): script_dir =", "hartbeatData: self.hartbeatJson = json.load(hartbeatData) self.helper = PayloadHelper() for item in self.hartbeatJson: payload =", "for item in self.hartbeatJson: payload = self.helper.map(item) self.assertIsNotNone(payload.heartbeat) def tearDown(self): self.helper = None", "src.utils.payloadHelper import PayloadHelper class MappedPayloadTests(unittest.TestCase): def setUp(self): pass def test_hartbeat(self): script_dir = os.path.dirname(__file__)", "PayloadHelper class MappedPayloadTests(unittest.TestCase): def setUp(self): pass def test_hartbeat(self): script_dir = os.path.dirname(__file__) rel_path =", "setUp(self): pass def test_hartbeat(self): script_dir = os.path.dirname(__file__) rel_path = 'data/source/heartbeatPayload.json' abs_file_path = os.path.join(script_dir,", "import unittest from src.utils.payloadHelper import PayloadHelper class MappedPayloadTests(unittest.TestCase): def setUp(self): pass def test_hartbeat(self):", "sys import unittest from src.utils.payloadHelper import PayloadHelper class MappedPayloadTests(unittest.TestCase): def setUp(self): pass def", "coding=utf-8 import json import os import sys import unittest from src.utils.payloadHelper import PayloadHelper", "json import os import sys import unittest from src.utils.payloadHelper import PayloadHelper class MappedPayloadTests(unittest.TestCase):", "os.path.join(script_dir, rel_path) with open(abs_file_path) as hartbeatData: self.hartbeatJson = json.load(hartbeatData) self.helper = PayloadHelper() for", "open(abs_file_path) as hartbeatData: self.hartbeatJson = json.load(hartbeatData) self.helper = PayloadHelper() for item in self.hartbeatJson:", "payload = self.helper.map(item) self.assertIsNotNone(payload.heartbeat) def tearDown(self): self.helper = None self.assertIsNone(self.helper) pass if __name__", "= PayloadHelper() for item in self.hartbeatJson: payload = self.helper.map(item) self.assertIsNotNone(payload.heartbeat) def tearDown(self): self.helper", "in self.hartbeatJson: payload = self.helper.map(item) self.assertIsNotNone(payload.heartbeat) def tearDown(self): self.helper = None self.assertIsNone(self.helper) pass", "def test_hartbeat(self): script_dir = os.path.dirname(__file__) rel_path = 'data/source/heartbeatPayload.json' abs_file_path = os.path.join(script_dir, rel_path) with", "import os import sys import unittest from src.utils.payloadHelper import PayloadHelper class MappedPayloadTests(unittest.TestCase): def", "self.helper = PayloadHelper() for item in self.hartbeatJson: payload = self.helper.map(item) self.assertIsNotNone(payload.heartbeat) def tearDown(self):", "item in self.hartbeatJson: payload = self.helper.map(item) self.assertIsNotNone(payload.heartbeat) def tearDown(self): self.helper = None self.assertIsNone(self.helper)", "= json.load(hartbeatData) self.helper = PayloadHelper() for item in self.hartbeatJson: payload = self.helper.map(item) self.assertIsNotNone(payload.heartbeat)", "python3 # coding=utf-8 import json import os import sys import unittest from src.utils.payloadHelper", "os.path.dirname(__file__) rel_path = 'data/source/heartbeatPayload.json' abs_file_path = os.path.join(script_dir, rel_path) with open(abs_file_path) as hartbeatData: self.hartbeatJson", "with open(abs_file_path) as hartbeatData: self.hartbeatJson = json.load(hartbeatData) self.helper = PayloadHelper() for item in", "json.load(hartbeatData) self.helper = PayloadHelper() for item in self.hartbeatJson: payload = self.helper.map(item) self.assertIsNotNone(payload.heartbeat) def", "= self.helper.map(item) self.assertIsNotNone(payload.heartbeat) def tearDown(self): self.helper = None self.assertIsNone(self.helper) pass if __name__ ==", "rel_path = 'data/source/heartbeatPayload.json' abs_file_path = os.path.join(script_dir, rel_path) with open(abs_file_path) as hartbeatData: self.hartbeatJson =", "self.hartbeatJson = json.load(hartbeatData) self.helper = PayloadHelper() for item in self.hartbeatJson: payload = self.helper.map(item)", "import PayloadHelper class MappedPayloadTests(unittest.TestCase): def setUp(self): pass def test_hartbeat(self): script_dir = os.path.dirname(__file__) rel_path", "class MappedPayloadTests(unittest.TestCase): def setUp(self): pass def test_hartbeat(self): script_dir = os.path.dirname(__file__) rel_path = 'data/source/heartbeatPayload.json'", "def setUp(self): pass def test_hartbeat(self): script_dir = os.path.dirname(__file__) rel_path = 'data/source/heartbeatPayload.json' abs_file_path =", "self.helper.map(item) self.assertIsNotNone(payload.heartbeat) def tearDown(self): self.helper = None self.assertIsNone(self.helper) pass if __name__ == '__main__':", "'data/source/heartbeatPayload.json' abs_file_path = os.path.join(script_dir, rel_path) with open(abs_file_path) as hartbeatData: self.hartbeatJson = json.load(hartbeatData) self.helper", "self.hartbeatJson: payload = self.helper.map(item) self.assertIsNotNone(payload.heartbeat) def tearDown(self): self.helper = None self.assertIsNone(self.helper) pass if" ]
[ "','.join([str(x) for x in [self.word, self.scrambled, self.guesser, self.revealed_idx]]) def advance(self, state): if self.revealed_idx", ">= len(self.word) - 1: self.scrambled += random.choice(string.ascii_lowercase) self.revealed_idx += 1 state.update_history_log(f\"{self.guesser} advanced\") #", "word_for_guesser(guesser: Player, list_of_words): for word_in_list in list_of_words: if guesser == word_in_list.guesser: return word_in_list", "from flask_socketio import SocketIO, emit # socketio = SocketIO(current_app) # logger = current_app.logger", "+= 1 state.update_history_log(f\"{self.guesser} advanced\") # socketio.emit('word advanced', {}, namespace='/word') def assign_guesser(self, players): #", "class Word: def __init__(self, word: str, player: Player, guesser: Player = None): self.word", "= players.index(self.creator) if self_idx < len( players) - 1: # Array [a, b,", "def scramble(word): l = list(word) random.shuffle(l) return ''.join(l) @staticmethod def word_for_guesser(guesser: Player, list_of_words):", "word: str, player: Player, guesser: Player = None): self.word = word.upper() self.scrambled =", "import SocketIO, emit # socketio = SocketIO(current_app) # logger = current_app.logger class Word:", "None): self.word = word.upper() self.scrambled = Word.scramble(word) # Scrambled also contains bonus letters", "Assign a random guesser instead of a fixed one if self.guesser is None:", "= guesser self.revealed_idx = 0 def __repr__(self): return \"Word\" + ','.join([str(x) for x", "one if self.guesser is None: self_idx = players.index(self.creator) if self_idx < len( players)", "0 or 1, move right otherwise overflow guesser_idx = self_idx + 1 else:", "move right otherwise overflow guesser_idx = self_idx + 1 else: guesser_idx = 0", "# from flask_socketio import SocketIO, emit # socketio = SocketIO(current_app) # logger =", "players) - 1: # Array [a, b, c] has len 3 and idx", "right otherwise overflow guesser_idx = self_idx + 1 else: guesser_idx = 0 self.guesser", "assign_guesser(self, player: Player): # self.guesser = player @staticmethod def scramble(word): l = list(word)", "contains bonus letters at endgame self.creator = player self.guesser = guesser self.revealed_idx =", "if self.revealed_idx >= len(self.word) - 1: self.scrambled += random.choice(string.ascii_lowercase) self.revealed_idx += 1 state.update_history_log(f\"{self.guesser}", "1 else: guesser_idx = 0 self.guesser = players[guesser_idx] # def assign_guesser(self, player: Player):", "b, c] has len 3 and idx 0, 1, 2. If it's 0", "# def assign_guesser(self, player: Player): # self.guesser = player @staticmethod def scramble(word): l", "c] has len 3 and idx 0, 1, 2. If it's 0 or", "__init__(self, word: str, player: Player, guesser: Player = None): self.word = word.upper() self.scrambled", "letters at endgame self.creator = player self.guesser = guesser self.revealed_idx = 0 def", "self_idx < len( players) - 1: # Array [a, b, c] has len", "string from .player import Player # from flask import current_app # from flask_socketio", "import random import string from .player import Player # from flask import current_app", "2. If it's 0 or 1, move right otherwise overflow guesser_idx = self_idx", "import Player # from flask import current_app # from flask_socketio import SocketIO, emit", "overflow guesser_idx = self_idx + 1 else: guesser_idx = 0 self.guesser = players[guesser_idx]", "guesser_idx = self_idx + 1 else: guesser_idx = 0 self.guesser = players[guesser_idx] #", "len(self.word) - 1: self.scrambled += random.choice(string.ascii_lowercase) self.revealed_idx += 1 state.update_history_log(f\"{self.guesser} advanced\") # socketio.emit('word", "def assign_guesser(self, players): # TODO: Assign a random guesser instead of a fixed", "= None): self.word = word.upper() self.scrambled = Word.scramble(word) # Scrambled also contains bonus", "< len( players) - 1: # Array [a, b, c] has len 3", "self.guesser = players[guesser_idx] # def assign_guesser(self, player: Player): # self.guesser = player @staticmethod", "self.guesser is None: self_idx = players.index(self.creator) if self_idx < len( players) - 1:", "= self_idx + 1 else: guesser_idx = 0 self.guesser = players[guesser_idx] # def", "list(word) random.shuffle(l) return ''.join(l) @staticmethod def word_for_guesser(guesser: Player, list_of_words): for word_in_list in list_of_words:", "return \"Word\" + ','.join([str(x) for x in [self.word, self.scrambled, self.guesser, self.revealed_idx]]) def advance(self,", "player: Player): # self.guesser = player @staticmethod def scramble(word): l = list(word) random.shuffle(l)", "self.scrambled, self.guesser, self.revealed_idx]]) def advance(self, state): if self.revealed_idx >= len(self.word) - 1: self.scrambled", "import current_app # from flask_socketio import SocketIO, emit # socketio = SocketIO(current_app) #", "or 1, move right otherwise overflow guesser_idx = self_idx + 1 else: guesser_idx", "= players[guesser_idx] # def assign_guesser(self, player: Player): # self.guesser = player @staticmethod def", "guesser_idx = 0 self.guesser = players[guesser_idx] # def assign_guesser(self, player: Player): # self.guesser", "\"Word\" + ','.join([str(x) for x in [self.word, self.scrambled, self.guesser, self.revealed_idx]]) def advance(self, state):", "of a fixed one if self.guesser is None: self_idx = players.index(self.creator) if self_idx", "random.choice(string.ascii_lowercase) self.revealed_idx += 1 state.update_history_log(f\"{self.guesser} advanced\") # socketio.emit('word advanced', {}, namespace='/word') def assign_guesser(self,", "random import string from .player import Player # from flask import current_app #", "socketio.emit('word advanced', {}, namespace='/word') def assign_guesser(self, players): # TODO: Assign a random guesser", "self_idx = players.index(self.creator) if self_idx < len( players) - 1: # Array [a,", "instead of a fixed one if self.guesser is None: self_idx = players.index(self.creator) if", "current_app.logger class Word: def __init__(self, word: str, player: Player, guesser: Player = None):", "x in [self.word, self.scrambled, self.guesser, self.revealed_idx]]) def advance(self, state): if self.revealed_idx >= len(self.word)", "Array [a, b, c] has len 3 and idx 0, 1, 2. If", "player @staticmethod def scramble(word): l = list(word) random.shuffle(l) return ''.join(l) @staticmethod def word_for_guesser(guesser:", "idx 0, 1, 2. If it's 0 or 1, move right otherwise overflow", "__repr__(self): return \"Word\" + ','.join([str(x) for x in [self.word, self.scrambled, self.guesser, self.revealed_idx]]) def", "guesser instead of a fixed one if self.guesser is None: self_idx = players.index(self.creator)", "flask_socketio import SocketIO, emit # socketio = SocketIO(current_app) # logger = current_app.logger class", "self.revealed_idx >= len(self.word) - 1: self.scrambled += random.choice(string.ascii_lowercase) self.revealed_idx += 1 state.update_history_log(f\"{self.guesser} advanced\")", "Player): # self.guesser = player @staticmethod def scramble(word): l = list(word) random.shuffle(l) return", "0, 1, 2. If it's 0 or 1, move right otherwise overflow guesser_idx", "Word.scramble(word) # Scrambled also contains bonus letters at endgame self.creator = player self.guesser", "# Array [a, b, c] has len 3 and idx 0, 1, 2.", "str, player: Player, guesser: Player = None): self.word = word.upper() self.scrambled = Word.scramble(word)", "random guesser instead of a fixed one if self.guesser is None: self_idx =", "endgame self.creator = player self.guesser = guesser self.revealed_idx = 0 def __repr__(self): return", "player self.guesser = guesser self.revealed_idx = 0 def __repr__(self): return \"Word\" + ','.join([str(x)", "= Word.scramble(word) # Scrambled also contains bonus letters at endgame self.creator = player", "self.guesser = player @staticmethod def scramble(word): l = list(word) random.shuffle(l) return ''.join(l) @staticmethod", "1, 2. If it's 0 or 1, move right otherwise overflow guesser_idx =", "# from flask import current_app # from flask_socketio import SocketIO, emit # socketio", "None: self_idx = players.index(self.creator) if self_idx < len( players) - 1: # Array", ".player import Player # from flask import current_app # from flask_socketio import SocketIO,", "and idx 0, 1, 2. If it's 0 or 1, move right otherwise", "emit # socketio = SocketIO(current_app) # logger = current_app.logger class Word: def __init__(self,", "in [self.word, self.scrambled, self.guesser, self.revealed_idx]]) def advance(self, state): if self.revealed_idx >= len(self.word) -", "1: self.scrambled += random.choice(string.ascii_lowercase) self.revealed_idx += 1 state.update_history_log(f\"{self.guesser} advanced\") # socketio.emit('word advanced', {},", "3 and idx 0, 1, 2. If it's 0 or 1, move right", "players.index(self.creator) if self_idx < len( players) - 1: # Array [a, b, c]", "players): # TODO: Assign a random guesser instead of a fixed one if", "self.word = word.upper() self.scrambled = Word.scramble(word) # Scrambled also contains bonus letters at", "= SocketIO(current_app) # logger = current_app.logger class Word: def __init__(self, word: str, player:", "def __init__(self, word: str, player: Player, guesser: Player = None): self.word = word.upper()", "advanced\") # socketio.emit('word advanced', {}, namespace='/word') def assign_guesser(self, players): # TODO: Assign a", "advanced', {}, namespace='/word') def assign_guesser(self, players): # TODO: Assign a random guesser instead", "# self.guesser = player @staticmethod def scramble(word): l = list(word) random.shuffle(l) return ''.join(l)", "also contains bonus letters at endgame self.creator = player self.guesser = guesser self.revealed_idx", "if self_idx < len( players) - 1: # Array [a, b, c] has", "return ''.join(l) @staticmethod def word_for_guesser(guesser: Player, list_of_words): for word_in_list in list_of_words: if guesser", "players[guesser_idx] # def assign_guesser(self, player: Player): # self.guesser = player @staticmethod def scramble(word):", "guesser: Player = None): self.word = word.upper() self.scrambled = Word.scramble(word) # Scrambled also", "self.revealed_idx]]) def advance(self, state): if self.revealed_idx >= len(self.word) - 1: self.scrambled += random.choice(string.ascii_lowercase)", "1: # Array [a, b, c] has len 3 and idx 0, 1,", "list_of_words): for word_in_list in list_of_words: if guesser == word_in_list.guesser: return word_in_list return None", "= 0 self.guesser = players[guesser_idx] # def assign_guesser(self, player: Player): # self.guesser =", "Player # from flask import current_app # from flask_socketio import SocketIO, emit #", "bonus letters at endgame self.creator = player self.guesser = guesser self.revealed_idx = 0", "random.shuffle(l) return ''.join(l) @staticmethod def word_for_guesser(guesser: Player, list_of_words): for word_in_list in list_of_words: if", "{}, namespace='/word') def assign_guesser(self, players): # TODO: Assign a random guesser instead of", "If it's 0 or 1, move right otherwise overflow guesser_idx = self_idx +", "Word: def __init__(self, word: str, player: Player, guesser: Player = None): self.word =", "[a, b, c] has len 3 and idx 0, 1, 2. If it's", "SocketIO(current_app) # logger = current_app.logger class Word: def __init__(self, word: str, player: Player,", "# TODO: Assign a random guesser instead of a fixed one if self.guesser", "= player @staticmethod def scramble(word): l = list(word) random.shuffle(l) return ''.join(l) @staticmethod def", "# Scrambled also contains bonus letters at endgame self.creator = player self.guesser =", "a fixed one if self.guesser is None: self_idx = players.index(self.creator) if self_idx <", "self.creator = player self.guesser = guesser self.revealed_idx = 0 def __repr__(self): return \"Word\"", "a random guesser instead of a fixed one if self.guesser is None: self_idx", "player: Player, guesser: Player = None): self.word = word.upper() self.scrambled = Word.scramble(word) #", "at endgame self.creator = player self.guesser = guesser self.revealed_idx = 0 def __repr__(self):", "# socketio = SocketIO(current_app) # logger = current_app.logger class Word: def __init__(self, word:", "def advance(self, state): if self.revealed_idx >= len(self.word) - 1: self.scrambled += random.choice(string.ascii_lowercase) self.revealed_idx", "# socketio.emit('word advanced', {}, namespace='/word') def assign_guesser(self, players): # TODO: Assign a random", "TODO: Assign a random guesser instead of a fixed one if self.guesser is", "[self.word, self.scrambled, self.guesser, self.revealed_idx]]) def advance(self, state): if self.revealed_idx >= len(self.word) - 1:", "flask import current_app # from flask_socketio import SocketIO, emit # socketio = SocketIO(current_app)", "len( players) - 1: # Array [a, b, c] has len 3 and", "word.upper() self.scrambled = Word.scramble(word) # Scrambled also contains bonus letters at endgame self.creator", "= 0 def __repr__(self): return \"Word\" + ','.join([str(x) for x in [self.word, self.scrambled,", "scramble(word): l = list(word) random.shuffle(l) return ''.join(l) @staticmethod def word_for_guesser(guesser: Player, list_of_words): for", "logger = current_app.logger class Word: def __init__(self, word: str, player: Player, guesser: Player", "self.revealed_idx = 0 def __repr__(self): return \"Word\" + ','.join([str(x) for x in [self.word,", "self.scrambled += random.choice(string.ascii_lowercase) self.revealed_idx += 1 state.update_history_log(f\"{self.guesser} advanced\") # socketio.emit('word advanced', {}, namespace='/word')", "self.guesser = guesser self.revealed_idx = 0 def __repr__(self): return \"Word\" + ','.join([str(x) for", "from flask import current_app # from flask_socketio import SocketIO, emit # socketio =", "- 1: # Array [a, b, c] has len 3 and idx 0,", "def assign_guesser(self, player: Player): # self.guesser = player @staticmethod def scramble(word): l =", "self.scrambled = Word.scramble(word) # Scrambled also contains bonus letters at endgame self.creator =", "otherwise overflow guesser_idx = self_idx + 1 else: guesser_idx = 0 self.guesser =", "it's 0 or 1, move right otherwise overflow guesser_idx = self_idx + 1", "if self.guesser is None: self_idx = players.index(self.creator) if self_idx < len( players) -", "+ 1 else: guesser_idx = 0 self.guesser = players[guesser_idx] # def assign_guesser(self, player:", "+ ','.join([str(x) for x in [self.word, self.scrambled, self.guesser, self.revealed_idx]]) def advance(self, state): if", "- 1: self.scrambled += random.choice(string.ascii_lowercase) self.revealed_idx += 1 state.update_history_log(f\"{self.guesser} advanced\") # socketio.emit('word advanced',", "assign_guesser(self, players): # TODO: Assign a random guesser instead of a fixed one", "fixed one if self.guesser is None: self_idx = players.index(self.creator) if self_idx < len(", "def word_for_guesser(guesser: Player, list_of_words): for word_in_list in list_of_words: if guesser == word_in_list.guesser: return", "socketio = SocketIO(current_app) # logger = current_app.logger class Word: def __init__(self, word: str,", "Scrambled also contains bonus letters at endgame self.creator = player self.guesser = guesser", "0 def __repr__(self): return \"Word\" + ','.join([str(x) for x in [self.word, self.scrambled, self.guesser,", "+= random.choice(string.ascii_lowercase) self.revealed_idx += 1 state.update_history_log(f\"{self.guesser} advanced\") # socketio.emit('word advanced', {}, namespace='/word') def", "SocketIO, emit # socketio = SocketIO(current_app) # logger = current_app.logger class Word: def", "l = list(word) random.shuffle(l) return ''.join(l) @staticmethod def word_for_guesser(guesser: Player, list_of_words): for word_in_list", "= list(word) random.shuffle(l) return ''.join(l) @staticmethod def word_for_guesser(guesser: Player, list_of_words): for word_in_list in", "''.join(l) @staticmethod def word_for_guesser(guesser: Player, list_of_words): for word_in_list in list_of_words: if guesser ==", "state.update_history_log(f\"{self.guesser} advanced\") # socketio.emit('word advanced', {}, namespace='/word') def assign_guesser(self, players): # TODO: Assign", "# logger = current_app.logger class Word: def __init__(self, word: str, player: Player, guesser:", "@staticmethod def scramble(word): l = list(word) random.shuffle(l) return ''.join(l) @staticmethod def word_for_guesser(guesser: Player,", "Player = None): self.word = word.upper() self.scrambled = Word.scramble(word) # Scrambled also contains", "has len 3 and idx 0, 1, 2. If it's 0 or 1,", "else: guesser_idx = 0 self.guesser = players[guesser_idx] # def assign_guesser(self, player: Player): #", "def __repr__(self): return \"Word\" + ','.join([str(x) for x in [self.word, self.scrambled, self.guesser, self.revealed_idx]])", "= player self.guesser = guesser self.revealed_idx = 0 def __repr__(self): return \"Word\" +", "@staticmethod def word_for_guesser(guesser: Player, list_of_words): for word_in_list in list_of_words: if guesser == word_in_list.guesser:", "1, move right otherwise overflow guesser_idx = self_idx + 1 else: guesser_idx =", "import string from .player import Player # from flask import current_app # from", "Player, guesser: Player = None): self.word = word.upper() self.scrambled = Word.scramble(word) # Scrambled", "self.revealed_idx += 1 state.update_history_log(f\"{self.guesser} advanced\") # socketio.emit('word advanced', {}, namespace='/word') def assign_guesser(self, players):", "self_idx + 1 else: guesser_idx = 0 self.guesser = players[guesser_idx] # def assign_guesser(self,", "Player, list_of_words): for word_in_list in list_of_words: if guesser == word_in_list.guesser: return word_in_list return", "= word.upper() self.scrambled = Word.scramble(word) # Scrambled also contains bonus letters at endgame", "guesser self.revealed_idx = 0 def __repr__(self): return \"Word\" + ','.join([str(x) for x in", "state): if self.revealed_idx >= len(self.word) - 1: self.scrambled += random.choice(string.ascii_lowercase) self.revealed_idx += 1", "1 state.update_history_log(f\"{self.guesser} advanced\") # socketio.emit('word advanced', {}, namespace='/word') def assign_guesser(self, players): # TODO:", "namespace='/word') def assign_guesser(self, players): # TODO: Assign a random guesser instead of a", "0 self.guesser = players[guesser_idx] # def assign_guesser(self, player: Player): # self.guesser = player", "= current_app.logger class Word: def __init__(self, word: str, player: Player, guesser: Player =", "is None: self_idx = players.index(self.creator) if self_idx < len( players) - 1: #", "for x in [self.word, self.scrambled, self.guesser, self.revealed_idx]]) def advance(self, state): if self.revealed_idx >=", "current_app # from flask_socketio import SocketIO, emit # socketio = SocketIO(current_app) # logger", "len 3 and idx 0, 1, 2. If it's 0 or 1, move", "self.guesser, self.revealed_idx]]) def advance(self, state): if self.revealed_idx >= len(self.word) - 1: self.scrambled +=", "advance(self, state): if self.revealed_idx >= len(self.word) - 1: self.scrambled += random.choice(string.ascii_lowercase) self.revealed_idx +=", "from .player import Player # from flask import current_app # from flask_socketio import" ]
[ "from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists class TestMigrateMailingLists(OsfTestCase): def setUp(self): super(TestMigrateMailingLists, self).setUp() self.user1 =", "in users_with_mailing_list_ids) assert_true(self.user2._id in users_with_mailing_list_ids) assert_false(self.user3._id in users_with_mailing_list_ids) def test_migration_of_mailing_lists(self): assert_equal(self.user1.mailchimp_mailing_lists, {}) assert_equal(self.user2.mailchimp_mailing_lists,", "user in get_users_with_no_mailchimp_mailing_lists()] assert_equal(len(users_with_mailing_list_ids), 2) assert_true(self.user1._id in users_with_mailing_list_ids) assert_true(self.user2._id in users_with_mailing_list_ids) assert_false(self.user3._id in", "* from tests.base import OsfTestCase from tests.factories import UserFactory from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main,", "= UserFactory(mailing_lists={'mail': True}) self.user2 = UserFactory(mailing_lists={'mail': False}) self.user3 = UserFactory() self.user1.save() self.user2.save() def", "assert_true(self.user1._id in users_with_mailing_list_ids) assert_true(self.user2._id in users_with_mailing_list_ids) assert_false(self.user3._id in users_with_mailing_list_ids) def test_migration_of_mailing_lists(self): assert_equal(self.user1.mailchimp_mailing_lists, {})", "= UserFactory() self.user1.save() self.user2.save() def test_get_users_with_mailing_lists(self): users_with_mailing_list_ids = [user._id for user in get_users_with_no_mailchimp_mailing_lists()]", "assert_false(self.user3._id in users_with_mailing_list_ids) def test_migration_of_mailing_lists(self): assert_equal(self.user1.mailchimp_mailing_lists, {}) assert_equal(self.user2.mailchimp_mailing_lists, {}) main() self.user1.reload() self.user2.reload() assert_true(self.user1.mailchimp_mailing_lists.get(u'mail'))", "False}) self.user3 = UserFactory() self.user1.save() self.user2.save() def test_get_users_with_mailing_lists(self): users_with_mailing_list_ids = [user._id for user", "class TestMigrateMailingLists(OsfTestCase): def setUp(self): super(TestMigrateMailingLists, self).setUp() self.user1 = UserFactory(mailing_lists={'mail': True}) self.user2 = UserFactory(mailing_lists={'mail':", "def test_get_users_with_mailing_lists(self): users_with_mailing_list_ids = [user._id for user in get_users_with_no_mailchimp_mailing_lists()] assert_equal(len(users_with_mailing_list_ids), 2) assert_true(self.user1._id in", "import OsfTestCase from tests.factories import UserFactory from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists class TestMigrateMailingLists(OsfTestCase):", "users_with_mailing_list_ids = [user._id for user in get_users_with_no_mailchimp_mailing_lists()] assert_equal(len(users_with_mailing_list_ids), 2) assert_true(self.user1._id in users_with_mailing_list_ids) assert_true(self.user2._id", "2) assert_true(self.user1._id in users_with_mailing_list_ids) assert_true(self.user2._id in users_with_mailing_list_ids) assert_false(self.user3._id in users_with_mailing_list_ids) def test_migration_of_mailing_lists(self): assert_equal(self.user1.mailchimp_mailing_lists,", "nose.tools import * from tests.base import OsfTestCase from tests.factories import UserFactory from scripts.migration.migrate_mailing_lists_to_mailchimp_field", "for user in get_users_with_no_mailchimp_mailing_lists()] assert_equal(len(users_with_mailing_list_ids), 2) assert_true(self.user1._id in users_with_mailing_list_ids) assert_true(self.user2._id in users_with_mailing_list_ids) assert_false(self.user3._id", "test_get_users_with_mailing_lists(self): users_with_mailing_list_ids = [user._id for user in get_users_with_no_mailchimp_mailing_lists()] assert_equal(len(users_with_mailing_list_ids), 2) assert_true(self.user1._id in users_with_mailing_list_ids)", "get_users_with_no_mailchimp_mailing_lists()] assert_equal(len(users_with_mailing_list_ids), 2) assert_true(self.user1._id in users_with_mailing_list_ids) assert_true(self.user2._id in users_with_mailing_list_ids) assert_false(self.user3._id in users_with_mailing_list_ids) def", "UserFactory from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists class TestMigrateMailingLists(OsfTestCase): def setUp(self): super(TestMigrateMailingLists, self).setUp() self.user1", "True}) self.user2 = UserFactory(mailing_lists={'mail': False}) self.user3 = UserFactory() self.user1.save() self.user2.save() def test_get_users_with_mailing_lists(self): users_with_mailing_list_ids", "users_with_mailing_list_ids) assert_true(self.user2._id in users_with_mailing_list_ids) assert_false(self.user3._id in users_with_mailing_list_ids) def test_migration_of_mailing_lists(self): assert_equal(self.user1.mailchimp_mailing_lists, {}) assert_equal(self.user2.mailchimp_mailing_lists, {})", "UserFactory(mailing_lists={'mail': False}) self.user3 = UserFactory() self.user1.save() self.user2.save() def test_get_users_with_mailing_lists(self): users_with_mailing_list_ids = [user._id for", "assert_equal(len(users_with_mailing_list_ids), 2) assert_true(self.user1._id in users_with_mailing_list_ids) assert_true(self.user2._id in users_with_mailing_list_ids) assert_false(self.user3._id in users_with_mailing_list_ids) def test_migration_of_mailing_lists(self):", "get_users_with_no_mailchimp_mailing_lists class TestMigrateMailingLists(OsfTestCase): def setUp(self): super(TestMigrateMailingLists, self).setUp() self.user1 = UserFactory(mailing_lists={'mail': True}) self.user2 =", "self.user1.save() self.user2.save() def test_get_users_with_mailing_lists(self): users_with_mailing_list_ids = [user._id for user in get_users_with_no_mailchimp_mailing_lists()] assert_equal(len(users_with_mailing_list_ids), 2)", "self.user3 = UserFactory() self.user1.save() self.user2.save() def test_get_users_with_mailing_lists(self): users_with_mailing_list_ids = [user._id for user in", "assert_true(self.user2._id in users_with_mailing_list_ids) assert_false(self.user3._id in users_with_mailing_list_ids) def test_migration_of_mailing_lists(self): assert_equal(self.user1.mailchimp_mailing_lists, {}) assert_equal(self.user2.mailchimp_mailing_lists, {}) main()", "self).setUp() self.user1 = UserFactory(mailing_lists={'mail': True}) self.user2 = UserFactory(mailing_lists={'mail': False}) self.user3 = UserFactory() self.user1.save()", "in users_with_mailing_list_ids) def test_migration_of_mailing_lists(self): assert_equal(self.user1.mailchimp_mailing_lists, {}) assert_equal(self.user2.mailchimp_mailing_lists, {}) main() self.user1.reload() self.user2.reload() assert_true(self.user1.mailchimp_mailing_lists.get(u'mail')) assert_false(self.user2.mailchimp_mailing_lists.get(u'mail'))", "import UserFactory from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists class TestMigrateMailingLists(OsfTestCase): def setUp(self): super(TestMigrateMailingLists, self).setUp()", "in users_with_mailing_list_ids) assert_false(self.user3._id in users_with_mailing_list_ids) def test_migration_of_mailing_lists(self): assert_equal(self.user1.mailchimp_mailing_lists, {}) assert_equal(self.user2.mailchimp_mailing_lists, {}) main() self.user1.reload()", "users_with_mailing_list_ids) assert_false(self.user3._id in users_with_mailing_list_ids) def test_migration_of_mailing_lists(self): assert_equal(self.user1.mailchimp_mailing_lists, {}) assert_equal(self.user2.mailchimp_mailing_lists, {}) main() self.user1.reload() self.user2.reload()", "def setUp(self): super(TestMigrateMailingLists, self).setUp() self.user1 = UserFactory(mailing_lists={'mail': True}) self.user2 = UserFactory(mailing_lists={'mail': False}) self.user3", "self.user1 = UserFactory(mailing_lists={'mail': True}) self.user2 = UserFactory(mailing_lists={'mail': False}) self.user3 = UserFactory() self.user1.save() self.user2.save()", "from tests.base import OsfTestCase from tests.factories import UserFactory from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists", "OsfTestCase from tests.factories import UserFactory from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists class TestMigrateMailingLists(OsfTestCase): def", "from nose.tools import * from tests.base import OsfTestCase from tests.factories import UserFactory from", "setUp(self): super(TestMigrateMailingLists, self).setUp() self.user1 = UserFactory(mailing_lists={'mail': True}) self.user2 = UserFactory(mailing_lists={'mail': False}) self.user3 =", "UserFactory() self.user1.save() self.user2.save() def test_get_users_with_mailing_lists(self): users_with_mailing_list_ids = [user._id for user in get_users_with_no_mailchimp_mailing_lists()] assert_equal(len(users_with_mailing_list_ids),", "self.user2.save() def test_get_users_with_mailing_lists(self): users_with_mailing_list_ids = [user._id for user in get_users_with_no_mailchimp_mailing_lists()] assert_equal(len(users_with_mailing_list_ids), 2) assert_true(self.user1._id", "[user._id for user in get_users_with_no_mailchimp_mailing_lists()] assert_equal(len(users_with_mailing_list_ids), 2) assert_true(self.user1._id in users_with_mailing_list_ids) assert_true(self.user2._id in users_with_mailing_list_ids)", "from tests.factories import UserFactory from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists class TestMigrateMailingLists(OsfTestCase): def setUp(self):", "in get_users_with_no_mailchimp_mailing_lists()] assert_equal(len(users_with_mailing_list_ids), 2) assert_true(self.user1._id in users_with_mailing_list_ids) assert_true(self.user2._id in users_with_mailing_list_ids) assert_false(self.user3._id in users_with_mailing_list_ids)", "tests.base import OsfTestCase from tests.factories import UserFactory from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists class", "self.user2 = UserFactory(mailing_lists={'mail': False}) self.user3 = UserFactory() self.user1.save() self.user2.save() def test_get_users_with_mailing_lists(self): users_with_mailing_list_ids =", "tests.factories import UserFactory from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists class TestMigrateMailingLists(OsfTestCase): def setUp(self): super(TestMigrateMailingLists,", "super(TestMigrateMailingLists, self).setUp() self.user1 = UserFactory(mailing_lists={'mail': True}) self.user2 = UserFactory(mailing_lists={'mail': False}) self.user3 = UserFactory()", "scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists class TestMigrateMailingLists(OsfTestCase): def setUp(self): super(TestMigrateMailingLists, self).setUp() self.user1 = UserFactory(mailing_lists={'mail':", "UserFactory(mailing_lists={'mail': True}) self.user2 = UserFactory(mailing_lists={'mail': False}) self.user3 = UserFactory() self.user1.save() self.user2.save() def test_get_users_with_mailing_lists(self):", "= UserFactory(mailing_lists={'mail': False}) self.user3 = UserFactory() self.user1.save() self.user2.save() def test_get_users_with_mailing_lists(self): users_with_mailing_list_ids = [user._id", "= [user._id for user in get_users_with_no_mailchimp_mailing_lists()] assert_equal(len(users_with_mailing_list_ids), 2) assert_true(self.user1._id in users_with_mailing_list_ids) assert_true(self.user2._id in", "import main, get_users_with_no_mailchimp_mailing_lists class TestMigrateMailingLists(OsfTestCase): def setUp(self): super(TestMigrateMailingLists, self).setUp() self.user1 = UserFactory(mailing_lists={'mail': True})", "import * from tests.base import OsfTestCase from tests.factories import UserFactory from scripts.migration.migrate_mailing_lists_to_mailchimp_field import", "TestMigrateMailingLists(OsfTestCase): def setUp(self): super(TestMigrateMailingLists, self).setUp() self.user1 = UserFactory(mailing_lists={'mail': True}) self.user2 = UserFactory(mailing_lists={'mail': False})", "main, get_users_with_no_mailchimp_mailing_lists class TestMigrateMailingLists(OsfTestCase): def setUp(self): super(TestMigrateMailingLists, self).setUp() self.user1 = UserFactory(mailing_lists={'mail': True}) self.user2" ]
[ "= (1+nu)/(8*R**2)*X*Y strain['yx'] = strain['xy'] strain['zz'] = nu/(4*R**2)*(X**2+Y**2-L**2/8) #missing zero strains from sbcalc", "(uzzaux2+uzzaux3*np.cos(2*phi+beta))*r_squared) #In sbcalc, there's incorrectly sin instead of cos for k in stress:", "+ 3*S[5,5]*a**4/20, (S[0,1]/3 + S[5,5]/12)*a**2*b**2, -3*S[1,5]*a**4/20, - S[0,5]*a**2*b**2/12, -2*S[1,5]]) #dL/C31 A.append([S[5,5]*b**2, -S[1,5]*b**2, -S[0,5]*b**2,", "as in the now deprecated sbcalc x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) r_squared = X**2+Y**2 phi =", "np.nan for k in strain: strain[k][np.abs(X) > a/2] = np.nan strain[k][np.abs(Y) > b/2]", "S[5,5] = 2*(1+nu) S = S/E stress_iso, strain_iso, P_iso = isotropic_circular(Rx, Ry, L,", "= isotropic_circular(Rx, Ry, L, thickness, nu, E) meps = np.finfo(np.float).eps #machine epsilon for", "strain_imp, P_imp = anisotropic_rectangular(Rx, Ry, a, b, thickness, S) #add int indexing int2char_ind", "in range(3,4): for j in range(3,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO", "strain['xx'] = ((1-nu)*L**2/4-(1-3*nu)*X**2-(3-nu)*Y**2)/(16*R**2) strain['yy'] = ((1-nu)*L**2/4-(1-3*nu)*Y**2-(3-nu)*X**2)/(16*R**2) strain['xy'] = (1+nu)/(8*R**2)*X*Y strain['yx'] = strain['xy'] strain['zz']", "-3*S[0,5]*b**4/20, S[0,1]*a**2*b**2/12, 3*S[0,0]*b**4/20, S[0,0]]) #dL/C04 A.append([0, 0, 0, 2*S[0,1] + S[5,5], -2*S[1,5], -2*S[0,5],", "strains as in the now deprecated sbcalc x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) r_squared = X**2+Y**2 phi", "= np.array([0,0,0,0,0,0,0,0,-0.5]) #Construction of A matrix rows #[C11, C20, C02, C22, C31, C13,", "stress_imp, strain_imp, P_imp = anisotropic_rectangular(Rx, Ry, a, b, thickness, S) #add int indexing", "np.finfo(np.float).eps #machine epsilon #Check stresses for i in range(1,3): for j in range(1,3):", "the now deprecated sbcalc x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) r_squared = X**2+Y**2 phi = np.arctan2(Y,X) stress", "D*(L**2/4-3*X**2-Y**2) stress['xy'] = 2*D*X*Y stress['yx'] = stress['xy'] #shorthand notation uzzaux1 = (S[2,0]+S[2,1])*L**2/4 uzzaux2", "stress = {} strain = {} D = 1/(2*R**2*(3*(S[0,0]+S[1,1])+2*S[0,1]+S[5,5])) stress['xx'] = D*(L**2/4-X**2-3*Y**2) stress['yy']", "np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_isotropic_rectangular(): #Calculate the reference stresses and strains as implemented in", "contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y)))))", "= 0.22 thickness = 0.1 Rx = 1000.0 Ry = 500.0 L =", "< meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(1,4): for j in range(1,4): num_ind", "np.isnan(strain_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain[str_ind] - strain_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y))))) #check the contact force", "sbcalc strain['xz'] = X*0 strain['zx'] = X*0 strain['yz'] = X*0 strain['zy'] = X*0", "= -nu S[1,2] = -nu S[1,0] = -nu S[2,0] = -nu S[2,1] =", "test_anisotropic_circular_vs_isotropic_circular(): E = 165 nu = 0.22 thickness = 0.1 Rx = 1000.0", "A.append([-S[0,5]*b**2, S[0,1]*b**2, S[0,0]*b**2, S[0,0]*a**2*b**2/12 + 3*S[0,1]*b**4/20, -S[0,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, S[0,1]*a**2*b**2/12, 3*S[0,0]*b**4/20, S[0,0]]) #dL/C04 A.append([0,", "C13, C40, C04, lambda] A = [] A.append([12*S[5,5], -12*S[1,5], -12*S[0,5], -(S[0,5]*a**2 + S[1,5]*b**2),", "P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_isotropic_rectangular(): #Calculate the reference stresses and strains", "S) x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon int2char_ind = ['','x','y','z'] meps =", "stress: stress[k][X**2+Y**2 > L**2/4] = np.nan for k in strain: strain[k][X**2+Y**2 > L**2/4]", "np.isnan(stress_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(1,4):", "L = 100.0 S = np.zeros((6,6)) #The elastic matrix for isotropic crystal S[0,0]", "stress_aniso, strain_aniso, P_aniso = anisotropic_circular(Rx, Ry, L, thickness, S) x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) meps =", "#machine epsilon #add int indexing int2char_ind = ['','x','y','z'] for i in range(1,3): for", "matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,3], thickness=Quantity(0.1,'mm')) a = 100.0 b = 100.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x)", "np.sqrt(Rx*Ry) L = 100.0 x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) stress = {} strain = {} stress['xx']", "-thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_rectangular_vs_isotropic_rectangular(): E =", "strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp =", "= 1 S[1,1] = 1 S[2,2] = 1 S[0,1] = -nu S[0,2] =", "np.finfo(np.float).eps #machine epsilon int2char_ind = ['','x','y','z'] meps = np.finfo(np.float).eps #machine epsilon #Check stresses", "np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain[str_ind] - strain_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y))))) #check the contact", "= stress['xy'] strain['zz'] = nu/(g*R**2) * (((3+nu)/2+5*(b/a)**2+(1-nu)/2*(b/a)**4)*(X**2 - a**2/12)+\\ ((3+nu)/2+5*(a/b)**2+(1-nu)/2*(a/b)**4)*(Y**2 - b**2/12)) for", "contact forces assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_anisotropic_rectangular_vs_old_Version(): #For", "assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_rectangular_vs_isotropic_rectangular(): E = 165", "#Check contact forces assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_anisotropic_rectangular_vs_old_Version():", "strains from sbcalc strain['xz'] = X*0 strain['zx'] = X*0 strain['yz'] = X*0 strain['zy']", "+ S[5,5]/12)*a**2*b**2, S[0,0]*a**2*b**2/3 + 3*S[5,5]*b**4/20, -S[1,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, -2*S[0,5]]) #dL/C13 A.append([-S[1,5]*a**2, S[1,1]*a**2, S[0,1]*a**2, S[1,1]*a**2*b**2/12", "S[1,2] = -nu S[1,0] = -nu S[2,0] = -nu S[2,1] = -nu S[3,3]", "for j in [1,3]: if i==1 and j == 1: continue num_ind =", "in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in range(1,4): for j in range(1,4):", "def test_isotropic_rectangular(): #Calculate the reference stresses and strains as implemented in the #deprecated", "range(1,3): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y) - stress_aniso[num_ind](X,Y)) < meps,", "int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[num_ind] - stress_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y))", "= np.nan for k in strain: strain[k][np.abs(X) > a/2] = np.nan strain[k][np.abs(Y) >", "(C02 + C22*X**2 + 2*C13*X*Y + C04*Y**2)/R**2 stress['yy'] = (C20 + C22*Y**2 +", "stress_iso, strain_iso, P_iso = isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E) stress_aniso, strain_aniso,", "E = 165 nu = 0.22 thickness = 0.1 Rx = 1000.0 Ry", "strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_rectangular(Rx, Ry,", "stress['xx'] = D*(L**2/4-X**2-3*Y**2) stress['yy'] = D*(L**2/4-3*X**2-Y**2) stress['xy'] = 2*D*X*Y stress['yx'] = stress['xy'] #shorthand", "10*((a/b)**2+(b/a)**2) + (1-nu)*((a/b)**2-(b/a)**2)**2 stress['xx'] = E/(g*R**2) * (a**2/12-X**2 + ((1+nu)/2 + 5*(a/b)**2 +", "S) #add int indexing int2char_ind = ['','x','y','z'] for i in range(1,3): for j", "Ry, L, thickness, nu, E) stress_aniso, strain_aniso, P_aniso = anisotropic_circular(Rx, Ry, L, thickness,", "= TTcrystal(crystal='Si',hkl=[9,5,1], thickness=Quantity(0.1,'mm')) S = ttx.S.in_units('GPa^-1') thickness = 0.1 Rx = 1000.0 Ry", "- strain_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y))))) #check the contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry)", "< meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_sbcalc(): #For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,1], thickness=Quantity(0.1,'mm'))", "C40, C04, L = np.linalg.solve(A,B) stress['xx'] = (C02 + C22*X**2 + 2*C13*X*Y +", "in stress: stress[k][np.abs(X) > a/2] = np.nan stress[k][np.abs(Y) > b/2] = np.nan for", "L**2/4] = np.nan for k in strain: strain[k][X**2+Y**2 > L**2/4] = np.nan #add", "0.1 Rx = 1000.0 Ry = 500.0 L = 100.0 S = np.zeros((6,6))", "P_imp = anisotropic_rectangular(Rx, Ry, a, b, thickness, S) #add int indexing int2char_ind =", "= 165 nu = 0.22 thickness = 0.1 Rx = 1000.0 Ry =", "range(1,3): for j in range(1,3): str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) <", "3*S[0,1]*a**4/20 + S[1,1]*a**2*b**2/12, 3*S[0,1]*b**4/20 + S[0,0]*a**2*b**2/12, 2*S[0,1] + S[5,5]]) #dL/C22 A.append([S[5,5]*a**2, -S[1,5]*a**2, -S[0,5]*a**2,", "- P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_rectangular_vs_isotropic_rectangular(): E = 165 nu =", "2*(1+nu) S[5,5] = 2*(1+nu) S = S/E stress_iso, strain_iso, P_iso = isotropic_rectangular(Rx, Ry,", "S[0,1]*b**2), -S[0,5]*a**2, -S[0,5]*b**2, S[0,1]*a**2, S[0,0]*b**2, 0]) #dL/C02 A.append([-12*S[1,5], 12*S[1,1], 12*S[1,0], (S[1,0]*a**2 + S[1,1]*b**2),", "solution through solving a linear system A*x = b B = np.array([0,0,0,0,0,0,0,0,-0.5]) #Construction", "A.append([S[5,5]*a**2, -S[1,5]*a**2, -S[0,5]*a**2, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), S[1,1]*a**2*b**2/3 + 3*S[5,5]*a**4/20, (S[0,1]/3 + S[5,5]/12)*a**2*b**2, -3*S[1,5]*a**4/20,", "P_iso = isotropic_circular(Rx, Ry, L, thickness, nu, E) stress_aniso, strain_aniso, P_aniso = anisotropic_circular(Rx,", "reference stresses and strains as implemented in the #deprecated sbcalc package E =", "x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon int2char_ind = ['','x','y','z'] meps = np.finfo(np.float).eps", "anisotropic_circular(Rx, Ry, L, thickness, S) x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon int2char_ind", "assert np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y) - strain_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[num_ind](X,Y)), np.isnan(strain_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y) - strain_aniso[str_ind](X,Y)) <", "np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_rectangular_vs_isotropic_rectangular(): E = 165 nu", "-2*S[0,5]]) #dL/C13 A.append([-S[1,5]*a**2, S[1,1]*a**2, S[0,1]*a**2, S[1,1]*a**2*b**2/12 + 3*S[0,1]*a**4/20, -3*S[1,5]*a**4/20, -S[1,5]*a**2*b**2/12, 3*S[1,1]*a**4/20, S[0,1]*a**2*b**2/12, S[1,1]])", "S[0,1]*a**2*b**2/12, S[1,1]]) #dL/C40 A.append([-S[0,5]*b**2, S[0,1]*b**2, S[0,0]*b**2, S[0,0]*a**2*b**2/12 + 3*S[0,1]*b**4/20, -S[0,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, S[0,1]*a**2*b**2/12, 3*S[0,0]*b**4/20,", "= anisotropic_rectangular(Rx, Ry, a, b, thickness, S) #add int indexing int2char_ind = ['','x','y','z']", "x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) stress = {} strain = {} stress['xx'] = -E/(16*R**2)*(X**2 + 3*Y**2", "strain_imp, P_imp = isotropic_circular(Rx, Ry, L, thickness, nu, E) meps = np.finfo(np.float).eps #machine", "stress_aniso, strain_aniso, P_aniso = anisotropic_rectangular(Rx, Ry, a, b, thickness, S) x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) meps", "S = S/E stress_iso, strain_iso, P_iso = isotropic_circular(Rx, Ry, L, thickness, nu, E)", "j in [1,3]: if i==1 and j == 1: continue strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]]", "strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_circular(Rx, Ry,", "thickness = 0.1 Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) L", "strain: strain[k][X**2+Y**2 > L**2/4] = np.nan stress_imp, strain_imp, P_imp = anisotropic_circular(Rx, Ry, L,", "np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in [1,3]: for j", "np.nan stress[k][np.abs(Y) > b/2] = np.nan for k in strain: strain[k][np.abs(X) > a/2]", "j in range(1,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y) - strain_aniso[num_ind](X,Y))", "range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in range(1,4): for j in range(1,4): strain[i*10+j]", "test_anisotropic_rectangular_vs_old_Version(): #For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,3], thickness=Quantity(0.1,'mm')) a = 100.0 b =", "= np.arctan2(S[2,5],(S[2,1]-S[2,0])) strain['zz'] = D*(uzzaux1 - (uzzaux2+uzzaux3*np.cos(2*phi+beta))*r_squared) #In sbcalc, there's incorrectly sin instead", "meps = np.finfo(np.float).eps #machine epsilon #add int indexing int2char_ind = ['','x','y','z'] for i", "8 + 10*((a/b)**2+(b/a)**2) + (1-nu)*((a/b)**2-(b/a)**2)**2 stress['xx'] = E/(g*R**2) * (a**2/12-X**2 + ((1+nu)/2 +", "i==1 and j == 1: continue strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] meps = np.finfo(np.float).eps #machine", "range(1,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y) - strain_aniso[num_ind](X,Y)) < meps,", "< meps, np.logical_and(np.isnan(stress_iso[str_ind](X,Y)), np.isnan(stress_aniso[str_ind](X,Y))))) #Check strains for i in range(1,4): for j in", "P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_isotropic_circular():", "np.sqrt(Rx*Ry) a = 100.0 b = 50.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) stress = {} strain", "matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,1], thickness=Quantity(0.1,'mm')) S = ttx.S.in_units('GPa^-1') thickness = 0.1 Rx =", "500.0 R = np.sqrt(Rx*Ry) S = ttx.S.in_units('GPa^-1') stress = {} strain = {}", "mask mask = np.ones(X.shape) mask[np.abs(X)>a/2] = 0 mask[np.abs(Y)>b/2] = 0 for key in", "np.isnan(P_aniso(X,Y))))) def test_anisotropic_rectangular_vs_old_Version(): #For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,3], thickness=Quantity(0.1,'mm')) a = 100.0", "from pyTTE import TTcrystal, Quantity def test_isotropic_circular(): #Calculate the reference stresses and strains", "-(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), (S[0,1]/3 + S[5,5]/12)*a**2*b**2, S[0,0]*a**2*b**2/3 + 3*S[5,5]*b**4/20, -S[1,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, -2*S[0,5]]) #dL/C13", "-nu S[0,2] = -nu S[1,2] = -nu S[1,0] = -nu S[2,0] = -nu", "and strains as implemented in the #deprecated sbcalc package E = 165 nu", "isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E) stress_aniso, strain_aniso, P_aniso = anisotropic_rectangular(Rx, Ry,", "for k in strain: strain[k][X**2+Y**2 > L**2/4] = np.nan #add int indexing int2char_ind", "= {} strain = {} g = 8 + 10*((a/b)**2+(b/a)**2) + (1-nu)*((a/b)**2-(b/a)**2)**2 stress['xx']", "S[1,1]*a**2*b**2/12, 3*S[0,1]*b**4/20 + S[0,0]*a**2*b**2/12, 2*S[0,1] + S[5,5]]) #dL/C22 A.append([S[5,5]*a**2, -S[1,5]*a**2, -S[0,5]*a**2, -(3*S[0,5]*a**4/20 +", "= int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain[str_ind] -", "stress['yx'] = stress['xy'] strain['xx'] = ((1-nu)*L**2/4-(1-3*nu)*X**2-(3-nu)*Y**2)/(16*R**2) strain['yy'] = ((1-nu)*L**2/4-(1-3*nu)*Y**2-(3-nu)*X**2)/(16*R**2) strain['xy'] = (1+nu)/(8*R**2)*X*Y strain['yx']", "j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in range(1,4): for j in", "pytest. Created on Sat May 9 00:09:00 2020 @author: aripekka \"\"\" import sys", "thickness, S) #add int indexing int2char_ind = ['','x','y','z'] for i in range(1,3): for", "for i in range(3,4): for j in range(3,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE", "assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in [1,3]: for", "= strain[int2char_ind[i]+int2char_ind[j]] meps = np.finfo(np.float).eps #machine epsilon for i in range(1,3): for j", "int2char_ind = ['','x','y','z'] for i in range(1,3): for j in range(1,3): str_ind =", "stress[int2char_ind[i]+int2char_ind[j]] for i in range(3,4): for j in range(3,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE", "for i in range(3,4): for j in range(3,4): num_ind = i*10+j str_ind =", "def test_anisotropic_rectangular_vs_isotropic_rectangular(): E = 165 nu = 0.22 thickness = 0.1 Rx =", "S[0,1]*a**2, S[1,1]*a**2*b**2/12 + 3*S[0,1]*a**4/20, -3*S[1,5]*a**4/20, -S[1,5]*a**2*b**2/12, 3*S[1,1]*a**4/20, S[0,1]*a**2*b**2/12, S[1,1]]) #dL/C40 A.append([-S[0,5]*b**2, S[0,1]*b**2, S[0,0]*b**2,", "r_squared = X**2+Y**2 phi = np.arctan2(Y,X) stress = {} strain = {} D", "= {} #Numerical solution through solving a linear system A*x = b B", "strain_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y))))) #check the contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert", "R = np.sqrt(Rx*Ry) a = 100.0 b = 50.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) stress =", "thickness=Quantity(0.1,'mm')) S = ttx.S.in_units('GPa^-1') thickness = 0.1 Rx = 1000.0 Ry = 500.0", "+ S[5,5]]) #dL/C22 A.append([S[5,5]*a**2, -S[1,5]*a**2, -S[0,5]*a**2, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), S[1,1]*a**2*b**2/3 + 3*S[5,5]*a**4/20, (S[0,1]/3", "i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y))))) assert", "the contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P),", "stress['xy'] strain['zz'] = nu/(g*R**2) * (((3+nu)/2+5*(b/a)**2+(1-nu)/2*(b/a)**4)*(X**2 - a**2/12)+\\ ((3+nu)/2+5*(a/b)**2+(1-nu)/2*(a/b)**4)*(Y**2 - b**2/12)) for k", "S[0,0]*a**2 + S[0,1]*b**2, (S[0,1]+2*S[5,5])*a**2*b**2/6 + 3*(S[0,0]*a**4 + S[1,1]*b**4)/20, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), -(3*S[1,5]*b**4/20 +", "np.isnan(stress_imp[str_ind](X,Y))))) for i in [1,3]: for j in [1,3]: if i==1 and j", "a = 100.0 b = 50.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) stress = {} strain =", "-E/(16*R**2)*(3*X**2 + Y**2 -L**2/4) stress['xy'] = E/(8*R**2)*X*Y stress['yx'] = stress['xy'] strain['xx'] = ((1-nu)*L**2/4-(1-3*nu)*X**2-(3-nu)*Y**2)/(16*R**2)", "- P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_sbcalc(): #For compliance matrix ttx =", "-(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), 3*S[0,1]*a**4/20 + S[1,1]*a**2*b**2/12, 3*S[0,1]*b**4/20 + S[0,0]*a**2*b**2/12, 2*S[0,1]", "#dL/C31 A.append([S[5,5]*b**2, -S[1,5]*b**2, -S[0,5]*b**2, -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), (S[0,1]/3 + S[5,5]/12)*a**2*b**2, S[0,0]*a**2*b**2/3 + 3*S[5,5]*b**4/20,", "+ C22*Y**2 + 2*C31*X*Y + C40*X**2)/R**2 stress['xy'] = -(C11 + 2*C22*X*Y + C31*X**2", "meps = np.finfo(np.float).eps #machine epsilon for i in range(1,3): for j in range(1,3):", "on this line (corrected here) stress['xy'] = 2*E/(g*R**2)*X*Y stress['yx'] = stress['xy'] strain['zz'] =", "= 0.1 Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) S =", "0 mask[np.abs(Y)>b/2] = 0 for key in stress: stress[key][mask < 0.5] = np.nan", "C04, L = np.linalg.solve(A,B) stress['xx'] = (C02 + C22*X**2 + 2*C13*X*Y + C04*Y**2)/R**2", "meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(3,4): for j in range(3,4): num_ind =", "anisotropic_circular(Rx, Ry, L, thickness, S) meps = np.finfo(np.float).eps #machine epsilon #add int indexing", "S/E stress_iso, strain_iso, P_iso = isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E) stress_aniso,", "12*S[1,1], 12*S[1,0], (S[1,0]*a**2 + S[1,1]*b**2), -S[1,5]*a**2, -S[1,5]*b**2, S[1,1]*a**2, S[0,1]*b**2, 0]) #dL/C20 A.append([-(S[0,5]*a**2 +", "(1-nu)*((a/b)**2-(b/a)**2)**2 stress['xx'] = E/(g*R**2) * (a**2/12-X**2 + ((1+nu)/2 + 5*(a/b)**2 + (1-nu)/2*(a/b)**4)*(b**2/12-Y**2)) stress['yy']", "import * from pyTTE import TTcrystal, Quantity def test_isotropic_circular(): #Calculate the reference stresses", "np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(1,4): for j", "j in [1,3]: if i==1 and j == 1: continue num_ind = i*10+j", "(b**2/12-Y**2 + ((1+nu)/2 + 5*(b/a)**2 + (1-nu)/2*(b/a)**4)*(a**2/12-X**2)) #sbcalc has a typo on this", "+ 2*C13*X*Y + C04*Y**2)/R**2 stress['yy'] = (C20 + C22*Y**2 + 2*C31*X*Y + C40*X**2)/R**2", "uzzaux1 = (S[2,0]+S[2,1])*L**2/4 uzzaux2 = 2*(S[2,0]+S[2,1]) uzzaux3 = np.sqrt((S[2,1]-S[2,0])**2+S[2,5]**2) beta = np.arctan2(S[2,5],(S[2,1]-S[2,0])) strain['zz']", "#machine epsilon int2char_ind = ['','x','y','z'] meps = np.finfo(np.float).eps #machine epsilon #Check stresses for", "a = 100.0 b = 100.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) thickness = 0.1 Rx =", "nu, E) stress_aniso, strain_aniso, P_aniso = anisotropic_rectangular(Rx, Ry, a, b, thickness, S) x=np.linspace(-a/2,a/2,150)", "stress['xy'] = E/(8*R**2)*X*Y stress['yx'] = stress['xy'] strain['xx'] = ((1-nu)*L**2/4-(1-3*nu)*X**2-(3-nu)*Y**2)/(16*R**2) strain['yy'] = ((1-nu)*L**2/4-(1-3*nu)*Y**2-(3-nu)*X**2)/(16*R**2) strain['xy']", "X*0 for k in stress: stress[k][X**2+Y**2 > L**2/4] = np.nan for k in", "meps, np.logical_and(np.isnan(stress_iso[str_ind](X,Y)), np.isnan(stress_aniso[str_ind](X,Y))))) #Check strains for i in range(1,4): for j in range(1,4):", "= 0.1 Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) a =", "for k in stress: stress[k][np.abs(X) > a/2] = np.nan stress[k][np.abs(Y) > b/2] =", "for k in strain: strain[k][np.abs(X) > a/2] = np.nan strain[k][np.abs(Y) > b/2] =", "C22, C31, C13, C40, C04, lambda] A = [] A.append([12*S[5,5], -12*S[1,5], -12*S[0,5], -(S[0,5]*a**2", "Y**2 -L**2/4) stress['xy'] = E/(8*R**2)*X*Y stress['yx'] = stress['xy'] strain['xx'] = ((1-nu)*L**2/4-(1-3*nu)*X**2-(3-nu)*Y**2)/(16*R**2) strain['yy'] =", "0.22 thickness = 0.1 Rx = 1000.0 Ry = 500.0 L = 100.0", "assert np.all(np.logical_or(np.abs(strain['zz'] - strain_imp['zz'](X,Y)) < meps, np.logical_and(np.isnan(strain['zz']), np.isnan(strain_imp['zz'](X,Y))))) #check the contact force P", "12*S[0,1], 12*S[0,0], (S[0,0]*a**2 + S[0,1]*b**2), -S[0,5]*a**2, -S[0,5]*b**2, S[0,1]*a**2, S[0,0]*b**2, 0]) #dL/C02 A.append([-12*S[1,5], 12*S[1,1],", "sys import os.path import numpy as np sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..')) from tbcalc.transverse_deformation import *", "THE IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E)", "X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon int2char_ind = ['','x','y','z'] meps = np.finfo(np.float).eps #machine", "= np.array(A) C11, C20, C02, C22, C31, C13, C40, C04, L = np.linalg.solve(A,B)", "= stress['xy'].copy() strain['zz'] = S[2,0]*stress['xx'] + S[2,1]*stress['yy'] + S[2,5]*stress['xy'] strain['xz'] = 0.5*(S[3,0]*stress['xx'] +", "= X*0 for k in stress: stress[k][X**2+Y**2 > L**2/4] = np.nan for k", "np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_sbcalc(): #For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,1], thickness=Quantity(0.1,'mm')) S =", "for i in range(1,4): for j in range(1,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE", "[1,3]: if i==1 and j == 1: continue num_ind = i*10+j str_ind =", "100.0 #Calculate the stresses and strains as in the now deprecated sbcalc x=np.linspace(-L/2,L/2,150)", "now deprecated sbcalc x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) r_squared = X**2+Y**2 phi = np.arctan2(Y,X) stress =", "Created on Sat May 9 00:09:00 2020 @author: aripekka \"\"\" import sys import", "np.isnan(P_imp(X,Y))))) def test_anisotropic_rectangular_vs_isotropic_rectangular(): E = 165 nu = 0.22 thickness = 0.1 Rx", "utf-8 -*- \"\"\" Tests for the transverse deformation functions. Run with pytest. Created", "strain = {} stress['xx'] = -E/(16*R**2)*(X**2 + 3*Y**2 -L**2/4) stress['yy'] = -E/(16*R**2)*(3*X**2 +", "in range(1,3): for j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in range(1,4):", "i in range(1,3): for j in range(1,3): str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[str_ind] -", "1 S[1,1] = 1 S[2,2] = 1 S[0,1] = -nu S[0,2] = -nu", "for i in range(1,3): for j in range(1,3): num_ind = i*10+j str_ind =", "epsilon int2char_ind = ['','x','y','z'] meps = np.finfo(np.float).eps #machine epsilon #Check stresses for i", "= np.finfo(np.float).eps #machine epsilon #Check stresses for i in range(1,3): for j in", "(corrected here) stress['xy'] = 2*E/(g*R**2)*X*Y stress['yx'] = stress['xy'] strain['zz'] = nu/(g*R**2) * (((3+nu)/2+5*(b/a)**2+(1-nu)/2*(b/a)**4)*(X**2", "= np.zeros((6,6)) #The elastic matrix for isotropic crystal S[0,0] = 1 S[1,1] =", "= stress['xy'] #shorthand notation uzzaux1 = (S[2,0]+S[2,1])*L**2/4 uzzaux2 = 2*(S[2,0]+S[2,1]) uzzaux3 = np.sqrt((S[2,1]-S[2,0])**2+S[2,5]**2)", "range(1,3): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[num_ind] - stress_imp[num_ind](X,Y)) < meps,", "-S[0,5]*b**2, 0]) #dL/C11 A.append([-12*S[0,5], 12*S[0,1], 12*S[0,0], (S[0,0]*a**2 + S[0,1]*b**2), -S[0,5]*a**2, -S[0,5]*b**2, S[0,1]*a**2, S[0,0]*b**2,", "+ S[1,5]*b**2), S[0,1]*a**2 + S[1,1]*b**2, S[0,0]*a**2 + S[0,1]*b**2, (S[0,1]+2*S[5,5])*a**2*b**2/6 + 3*(S[0,0]*a**4 + S[1,1]*b**4)/20,", "Ry = 500.0 R = np.sqrt(Rx*Ry) L = 100.0 #Calculate the stresses and", "np.logical_and(np.isnan(stress_iso[num_ind](X,Y)), np.isnan(stress_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y) - stress_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[str_ind](X,Y)), np.isnan(stress_aniso[str_ind](X,Y))))) #Check strains for", "> L**2/4] = np.nan for k in strain: strain[k][X**2+Y**2 > L**2/4] = np.nan", "np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y) - strain_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[str_ind](X,Y)), np.isnan(strain_aniso[str_ind](X,Y))))) #Check contact forces assert np.all(np.logical_or(np.abs(P_iso(X,Y) -", "strains for i in range(1,4): for j in range(1,4): num_ind = i*10+j str_ind", "b/2] = np.nan for k in strain: strain[k][np.abs(X) > a/2] = np.nan strain[k][np.abs(Y)", "def test_anisotropic_rectangular_vs_old_Version(): #For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,3], thickness=Quantity(0.1,'mm')) a = 100.0 b", "C04, lambda] A = [] A.append([12*S[5,5], -12*S[1,5], -12*S[0,5], -(S[0,5]*a**2 + S[1,5]*b**2), S[5,5]*a**2, S[5,5]*b**2,", "1 S[0,1] = -nu S[0,2] = -nu S[1,2] = -nu S[1,0] = -nu", "np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(3,4): for j in range(3,4): num_ind = i*10+j", "stress['yx'] = stress['xy'].copy() strain['zz'] = S[2,0]*stress['xx'] + S[2,1]*stress['yy'] + S[2,5]*stress['xy'] strain['xz'] = 0.5*(S[3,0]*stress['xx']", "for the transverse deformation functions. Run with pytest. Created on Sat May 9", "num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y) - stress_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[num_ind](X,Y)),", "= 0.1 Rx = 1000.0 Ry = 500.0 L = 100.0 S =", "= 500.0 R = np.sqrt(Rx*Ry) a = 100.0 b = 50.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x)", "{} g = 8 + 10*((a/b)**2+(b/a)**2) + (1-nu)*((a/b)**2-(b/a)**2)**2 stress['xx'] = E/(g*R**2) * (a**2/12-X**2", "np.all(np.logical_or(np.abs(strain['zz'] - strain_imp['zz'](X,Y)) < meps, np.logical_and(np.isnan(strain['zz']), np.isnan(strain_imp['zz'](X,Y))))) #check the contact force P =", "= E/(8*R**2)*X*Y stress['yx'] = stress['xy'] strain['xx'] = ((1-nu)*L**2/4-(1-3*nu)*X**2-(3-nu)*Y**2)/(16*R**2) strain['yy'] = ((1-nu)*L**2/4-(1-3*nu)*Y**2-(3-nu)*X**2)/(16*R**2) strain['xy'] =", "Ry, a, b, thickness, S) x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon int2char_ind", "range(3,4): for j in range(3,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind]", "-nu S[1,2] = -nu S[1,0] = -nu S[2,0] = -nu S[2,1] = -nu", "b, thickness, nu, E) stress_aniso, strain_aniso, P_aniso = anisotropic_rectangular(Rx, Ry, a, b, thickness,", "range(1,3): for j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in [1,3]: for", "isotropic_circular(Rx, Ry, L, thickness, nu, E) stress_aniso, strain_aniso, P_aniso = anisotropic_circular(Rx, Ry, L,", "for i in range(1,3): for j in range(1,3): str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[str_ind]", "2*(S[2,0]+S[2,1]) uzzaux3 = np.sqrt((S[2,1]-S[2,0])**2+S[2,5]**2) beta = np.arctan2(S[2,5],(S[2,1]-S[2,0])) strain['zz'] = D*(uzzaux1 - (uzzaux2+uzzaux3*np.cos(2*phi+beta))*r_squared) #In", "#Check strains for i in range(1,4): for j in range(1,4): num_ind = i*10+j", "isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E) meps = np.finfo(np.float).eps #machine epsilon for", "+ 3*(S[0,0]*a**4 + S[1,1]*b**4)/20, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), 3*S[0,1]*a**4/20 + S[1,1]*a**2*b**2/12,", "for j in range(1,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] -", "D*(uzzaux1 - (uzzaux2+uzzaux3*np.cos(2*phi+beta))*r_squared) #In sbcalc, there's incorrectly sin instead of cos for k", "np.nan #add int indexing int2char_ind = ['','x','y','z'] for i in range(1,3): for j", "= 8 + 10*((a/b)**2+(b/a)**2) + (1-nu)*((a/b)**2-(b/a)**2)**2 stress['xx'] = E/(g*R**2) * (a**2/12-X**2 + ((1+nu)/2", "TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_circular(Rx, Ry, L, thickness, nu, E)", "Ry = 500.0 L = 100.0 S = np.zeros((6,6)) #The elastic matrix for", "np.nan strain[k][np.abs(Y) > b/2] = np.nan #add int indexing int2char_ind = ['','x','y','z'] for", "< meps, np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain[str_ind] - strain_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y))))) #check", "< meps, np.logical_and(np.isnan(strain['zz']), np.isnan(strain_imp['zz'](X,Y))))) #check the contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P", "the reference stresses and strains as implemented in the #deprecated sbcalc package E", "- P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_isotropic_circular(): E = 165 nu =", "ttx.S.in_units('GPa^-1') stress = {} strain = {} #Numerical solution through solving a linear", "A*x = b B = np.array([0,0,0,0,0,0,0,0,-0.5]) #Construction of A matrix rows #[C11, C20,", "3*(S[0,0]*a**4 + S[1,1]*b**4)/20, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), 3*S[0,1]*a**4/20 + S[1,1]*a**2*b**2/12, 3*S[0,1]*b**4/20", "= stress[int2char_ind[i]+int2char_ind[j]] for i in range(1,4): for j in range(1,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]]", "k in stress: stress[k][X**2+Y**2 > L**2/4] = np.nan for k in strain: strain[k][X**2+Y**2", "S[2,1] = -nu S[3,3] = 2*(1+nu) S[4,4] = 2*(1+nu) S[5,5] = 2*(1+nu) S", "stress: stress[key][mask < 0.5] = np.nan for key in strain: strain[key][mask < 0.5]", "THE IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_circular(Rx, Ry, L, thickness, nu, E) meps", "3*S[5,5]*b**4/20, -S[1,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, -2*S[0,5]]) #dL/C13 A.append([-S[1,5]*a**2, S[1,1]*a**2, S[0,1]*a**2, S[1,1]*a**2*b**2/12 + 3*S[0,1]*a**4/20, -3*S[1,5]*a**4/20, -S[1,5]*a**2*b**2/12,", "num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[num_ind] - stress_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[num_ind]),", "#[C11, C20, C02, C22, C31, C13, C40, C04, lambda] A = [] A.append([12*S[5,5],", "= stress[int2char_ind[i]+int2char_ind[j]] for i in [1,3]: for j in [1,3]: if i==1 and", "and j == 1: continue strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] meps = np.finfo(np.float).eps #machine epsilon", "range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in [1,3]: for j in [1,3]: if", "strain['zz'] = nu/(g*R**2) * (((3+nu)/2+5*(b/a)**2+(1-nu)/2*(b/a)**4)*(X**2 - a**2/12)+\\ ((3+nu)/2+5*(a/b)**2+(1-nu)/2*(a/b)**4)*(Y**2 - b**2/12)) for k in", "strain['xz'] = 0.5*(S[3,0]*stress['xx'] + S[3,1]*stress['yy'] + S[3,5]*stress['xy'] ) strain['zx'] = strain['xz'].copy() #Apply mask", "= 500.0 R = np.sqrt(Rx*Ry) L = 100.0 #Calculate the stresses and strains", "#Check contact forces assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_isotropic_rectangular():", "= ((1-nu)*L**2/4-(1-3*nu)*X**2-(3-nu)*Y**2)/(16*R**2) strain['yy'] = ((1-nu)*L**2/4-(1-3*nu)*Y**2-(3-nu)*X**2)/(16*R**2) strain['xy'] = (1+nu)/(8*R**2)*X*Y strain['yx'] = strain['xy'] strain['zz'] =", "* (((3+nu)/2+5*(b/a)**2+(1-nu)/2*(b/a)**4)*(X**2 - a**2/12)+\\ ((3+nu)/2+5*(a/b)**2+(1-nu)/2*(a/b)**4)*(Y**2 - b**2/12)) for k in stress: stress[k][np.abs(X) >", "strain[k][X**2+Y**2 > L**2/4] = np.nan #add int indexing int2char_ind = ['','x','y','z'] for i", "x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon int2char_ind = ['','x','y','z'] meps = np.finfo(np.float).eps", "= -E/(16*R**2)*(3*X**2 + Y**2 -L**2/4) stress['xy'] = E/(8*R**2)*X*Y stress['yx'] = stress['xy'] strain['xx'] =", "L = 100.0 #Calculate the stresses and strains as in the now deprecated", "force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def", "with pytest. Created on Sat May 9 00:09:00 2020 @author: aripekka \"\"\" import", "a, b, thickness, nu, E) stress_aniso, strain_aniso, P_aniso = anisotropic_rectangular(Rx, Ry, a, b,", "stress[int2char_ind[i]+int2char_ind[j]] for i in [1,3]: for j in [1,3]: if i==1 and j", "X,Y=np.meshgrid(x,x) stress = {} strain = {} g = 8 + 10*((a/b)**2+(b/a)**2) +", "in range(3,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp,", "+ S[0,1]*b**2), -S[0,5]*a**2, -S[0,5]*b**2, S[0,1]*a**2, S[0,0]*b**2, 0]) #dL/C02 A.append([-12*S[1,5], 12*S[1,1], 12*S[1,0], (S[1,0]*a**2 +", "S[1,1]*b**2), -S[1,5]*a**2, -S[1,5]*b**2, S[1,1]*a**2, S[0,1]*b**2, 0]) #dL/C20 A.append([-(S[0,5]*a**2 + S[1,5]*b**2), S[0,1]*a**2 + S[1,1]*b**2,", "< meps, np.logical_and(np.isnan(stress_iso[num_ind](X,Y)), np.isnan(stress_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y) - stress_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[str_ind](X,Y)), np.isnan(stress_aniso[str_ind](X,Y))))) #Check", "np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y) - strain_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[num_ind](X,Y)), np.isnan(strain_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y) - strain_aniso[str_ind](X,Y)) < meps,", "-2*S[1,5]]) #dL/C31 A.append([S[5,5]*b**2, -S[1,5]*b**2, -S[0,5]*b**2, -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), (S[0,1]/3 + S[5,5]/12)*a**2*b**2, S[0,0]*a**2*b**2/3 +", "2*C13*X*Y + C04*Y**2)/R**2 stress['yy'] = (C20 + C22*Y**2 + 2*C31*X*Y + C40*X**2)/R**2 stress['xy']", "- b**2/12)) for k in stress: stress[k][np.abs(X) > a/2] = np.nan stress[k][np.abs(Y) >", "os.path.join(os.path.dirname(__file__),'..')) from tbcalc.transverse_deformation import * from pyTTE import TTcrystal, Quantity def test_isotropic_circular(): #Calculate", "b = 100.0 S = np.zeros((6,6)) #The elastic matrix for isotropic crystal S[0,0]", "1: continue strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] meps = np.finfo(np.float).eps #machine epsilon for i in", "P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_rectangular_vs_isotropic_rectangular(): E = 165 nu = 0.22", "thickness, S) x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon int2char_ind = ['','x','y','z'] meps", "= -nu S[2,1] = -nu S[3,3] = 2*(1+nu) S[4,4] = 2*(1+nu) S[5,5] =", "-S[1,5]*a**2, -S[0,5]*b**2, 0]) #dL/C11 A.append([-12*S[0,5], 12*S[0,1], 12*S[0,0], (S[0,0]*a**2 + S[0,1]*b**2), -S[0,5]*a**2, -S[0,5]*b**2, S[0,1]*a**2,", "S[2,2] = 1 S[0,1] = -nu S[0,2] = -nu S[1,2] = -nu S[1,0]", "np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain['zz'] - strain_imp['zz'](X,Y)) < meps,", "np.logical_and(np.isnan(stress_iso[str_ind](X,Y)), np.isnan(stress_aniso[str_ind](X,Y))))) #Check strains for i in range(1,4): for j in range(1,4): num_ind", "< meps, np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y))))) #check the contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P", "> b/2] = np.nan #add int indexing int2char_ind = ['','x','y','z'] for i in", "0]) #dL/C11 A.append([-12*S[0,5], 12*S[0,1], 12*S[0,0], (S[0,0]*a**2 + S[0,1]*b**2), -S[0,5]*a**2, -S[0,5]*b**2, S[0,1]*a**2, S[0,0]*b**2, 0])", "C22*X**2 + 2*C13*X*Y + C04*Y**2)/R**2 stress['yy'] = (C20 + C22*Y**2 + 2*C31*X*Y +", "00:09:00 2020 @author: aripekka \"\"\" import sys import os.path import numpy as np", "+ S[5,5], -2*S[1,5], -2*S[0,5], S[1,1], S[0,0], 0]) A = np.array(A) C11, C20, C02,", "stress['xy'] = 2*D*X*Y stress['yx'] = stress['xy'] #shorthand notation uzzaux1 = (S[2,0]+S[2,1])*L**2/4 uzzaux2 =", "= anisotropic_rectangular(Rx, Ry, a, b, thickness, S) x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine", "-2*S[1,5], -2*S[0,5], S[1,1], S[0,0], 0]) A = np.array(A) C11, C20, C02, C22, C31,", "np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y) - stress_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[str_ind](X,Y)), np.isnan(stress_aniso[str_ind](X,Y))))) #Check strains for i in range(1,4):", "= strain['xy'] strain['zz'] = nu/(4*R**2)*(X**2+Y**2-L**2/8) #missing zero strains from sbcalc strain['xz'] = X*0", "S[0,1]*a**2 + S[1,1]*b**2, S[0,0]*a**2 + S[0,1]*b**2, (S[0,1]+2*S[5,5])*a**2*b**2/6 + 3*(S[0,0]*a**4 + S[1,1]*b**4)/20, -(3*S[0,5]*a**4/20 +", "2*(1+nu) S[5,5] = 2*(1+nu) S = S/E stress_iso, strain_iso, P_iso = isotropic_circular(Rx, Ry,", "indexing int2char_ind = ['','x','y','z'] for i in range(1,3): for j in range(1,3): str_ind", "S[2,0]*stress['xx'] + S[2,1]*stress['yy'] + S[2,5]*stress['xy'] strain['xz'] = 0.5*(S[3,0]*stress['xx'] + S[3,1]*stress['yy'] + S[3,5]*stress['xy'] )", "= 2*(1+nu) S = S/E stress_iso, strain_iso, P_iso = isotropic_rectangular(Rx, Ry, a, b,", "[1,3]: for j in [1,3]: if i==1 and j == 1: continue strain[i*10+j]", "np.isnan(stress_imp[str_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain['zz'] - strain_imp['zz'](X,Y)) < meps, np.logical_and(np.isnan(strain['zz']), np.isnan(strain_imp['zz'](X,Y))))) #check the contact force", "< meps, np.logical_and(np.isnan(strain_iso[str_ind](X,Y)), np.isnan(strain_aniso[str_ind](X,Y))))) #Check contact forces assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps,", "- strain_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain[str_ind] - strain_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[str_ind]),", "2*D*X*Y stress['yx'] = stress['xy'] #shorthand notation uzzaux1 = (S[2,0]+S[2,1])*L**2/4 uzzaux2 = 2*(S[2,0]+S[2,1]) uzzaux3", "thickness, nu, E) stress_aniso, strain_aniso, P_aniso = anisotropic_circular(Rx, Ry, L, thickness, S) x=np.linspace(-L/2,L/2,150)", "stress[k][np.abs(X) > a/2] = np.nan stress[k][np.abs(Y) > b/2] = np.nan for k in", "S[1,1], S[0,0], 0]) A = np.array(A) C11, C20, C02, C22, C31, C13, C40,", "typo on this line (corrected here) stress['xy'] = 2*E/(g*R**2)*X*Y stress['yx'] = stress['xy'] strain['zz']", "int indexing int2char_ind = ['','x','y','z'] for i in range(1,3): for j in range(1,3):", "= -nu S[1,0] = -nu S[2,0] = -nu S[2,1] = -nu S[3,3] =", "= isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E) meps = np.finfo(np.float).eps #machine epsilon", "-*- \"\"\" Tests for the transverse deformation functions. Run with pytest. Created on", "= 1000.0 Ry = 500.0 L = 100.0 S = np.zeros((6,6)) #The elastic", "= X*0 strain['zy'] = X*0 for k in stress: stress[k][X**2+Y**2 > L**2/4] =", "= 50.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) stress = {} strain = {} g = 8", "meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_rectangular_vs_isotropic_rectangular(): E = 165 nu = 0.22 thickness =", "S[2,1]*stress['yy'] + S[2,5]*stress['xy'] strain['xz'] = 0.5*(S[3,0]*stress['xx'] + S[3,1]*stress['yy'] + S[3,5]*stress['xy'] ) strain['zx'] =", "meps, np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain[str_ind] - strain_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y))))) #check the", "['','x','y','z'] for i in range(1,3): for j in range(1,3): str_ind = int2char_ind[i]+int2char_ind[j] assert", "thickness, nu, E) stress_aniso, strain_aniso, P_aniso = anisotropic_rectangular(Rx, Ry, a, b, thickness, S)", "#dL/C20 A.append([-(S[0,5]*a**2 + S[1,5]*b**2), S[0,1]*a**2 + S[1,1]*b**2, S[0,0]*a**2 + S[0,1]*b**2, (S[0,1]+2*S[5,5])*a**2*b**2/6 + 3*(S[0,0]*a**4", "0.5] = np.nan for key in strain: strain[key][mask < 0.5] = np.nan stress_imp,", "nu = 0.22 thickness = 0.1 Rx = 1000.0 Ry = 500.0 R", "strain_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain[str_ind] - strain_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y)))))", "#deprecated sbcalc package E = 165 nu = 0.22 thickness = 0.1 Rx", "#Calculate the stresses and strains as in the now deprecated sbcalc x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x)", "(1-nu)/2*(b/a)**4)*(a**2/12-X**2)) #sbcalc has a typo on this line (corrected here) stress['xy'] = 2*E/(g*R**2)*X*Y", "S[1,1]]) #dL/C40 A.append([-S[0,5]*b**2, S[0,1]*b**2, S[0,0]*b**2, S[0,0]*a**2*b**2/12 + 3*S[0,1]*b**4/20, -S[0,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, S[0,1]*a**2*b**2/12, 3*S[0,0]*b**4/20, S[0,0]])", "S = np.zeros((6,6)) #The elastic matrix for isotropic crystal S[0,0] = 1 S[1,1]", "for key in strain: strain[key][mask < 0.5] = np.nan stress_imp, strain_imp, P_imp =", "S = S/E stress_iso, strain_iso, P_iso = isotropic_rectangular(Rx, Ry, a, b, thickness, nu,", "for j in range(3,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE IMPLEMENTATION", "Ry = 500.0 a = 100.0 b = 100.0 S = np.zeros((6,6)) #The", "-thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_sbcalc(): #For compliance", "= np.arctan2(Y,X) stress = {} strain = {} D = 1/(2*R**2*(3*(S[0,0]+S[1,1])+2*S[0,1]+S[5,5])) stress['xx'] =", "= 100.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) thickness = 0.1 Rx = 1000.0 Ry = 500.0", "-S[0,5]*a**2, -S[0,5]*b**2, S[0,1]*a**2, S[0,0]*b**2, 0]) #dL/C02 A.append([-12*S[1,5], 12*S[1,1], 12*S[1,0], (S[1,0]*a**2 + S[1,1]*b**2), -S[1,5]*a**2,", "assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain['zz'] - strain_imp['zz'](X,Y)) <", "S[2,5]*stress['xy'] strain['xz'] = 0.5*(S[3,0]*stress['xx'] + S[3,1]*stress['yy'] + S[3,5]*stress['xy'] ) strain['zx'] = strain['xz'].copy() #Apply", "j == 1: continue strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] meps = np.finfo(np.float).eps #machine epsilon for", "strain: strain[k][X**2+Y**2 > L**2/4] = np.nan #add int indexing int2char_ind = ['','x','y','z'] for", "of A matrix rows #[C11, C20, C02, C22, C31, C13, C40, C04, lambda]", "num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[num_ind]),", "S[0,1]*a**2, S[0,0]*b**2, 0]) #dL/C02 A.append([-12*S[1,5], 12*S[1,1], 12*S[1,0], (S[1,0]*a**2 + S[1,1]*b**2), -S[1,5]*a**2, -S[1,5]*b**2, S[1,1]*a**2,", "0]) #dL/C02 A.append([-12*S[1,5], 12*S[1,1], 12*S[1,0], (S[1,0]*a**2 + S[1,1]*b**2), -S[1,5]*a**2, -S[1,5]*b**2, S[1,1]*a**2, S[0,1]*b**2, 0])", "in range(1,3): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[num_ind] - stress_imp[num_ind](X,Y)) <", "-S[0,5]*b**2, -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), (S[0,1]/3 + S[5,5]/12)*a**2*b**2, S[0,0]*a**2*b**2/3 + 3*S[5,5]*b**4/20, -S[1,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, -2*S[0,5]])", "matrix rows #[C11, C20, C02, C22, C31, C13, C40, C04, lambda] A =", "stress = {} strain = {} stress['xx'] = -E/(16*R**2)*(X**2 + 3*Y**2 -L**2/4) stress['yy']", "#machine epsilon for i in range(1,3): for j in range(1,3): num_ind = i*10+j", "<reponame>aripekka/tbcalc # -*- coding: utf-8 -*- \"\"\" Tests for the transverse deformation functions.", "j in range(3,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y))", "there's incorrectly sin instead of cos for k in stress: stress[k][X**2+Y**2 > L**2/4]", "- strain_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[num_ind](X,Y)), np.isnan(strain_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y) - strain_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[str_ind](X,Y)),", "= {} strain = {} #Numerical solution through solving a linear system A*x", "import TTcrystal, Quantity def test_isotropic_circular(): #Calculate the reference stresses and strains as implemented", "#The elastic matrix for isotropic crystal S[0,0] = 1 S[1,1] = 1 S[2,2]", "os.path import numpy as np sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..')) from tbcalc.transverse_deformation import * from pyTTE", "coding: utf-8 -*- \"\"\" Tests for the transverse deformation functions. Run with pytest.", "in stress: stress[k][X**2+Y**2 > L**2/4] = np.nan for k in strain: strain[k][X**2+Y**2 >", "A = [] A.append([12*S[5,5], -12*S[1,5], -12*S[0,5], -(S[0,5]*a**2 + S[1,5]*b**2), S[5,5]*a**2, S[5,5]*b**2, -S[1,5]*a**2, -S[0,5]*b**2,", "str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain['zz']", "i in range(1,4): for j in range(1,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j]", "= {} D = 1/(2*R**2*(3*(S[0,0]+S[1,1])+2*S[0,1]+S[5,5])) stress['xx'] = D*(L**2/4-X**2-3*Y**2) stress['yy'] = D*(L**2/4-3*X**2-Y**2) stress['xy'] =", "L, thickness, nu, E) stress_aniso, strain_aniso, P_aniso = anisotropic_circular(Rx, Ry, L, thickness, S)", "int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain[str_ind] - strain_imp[str_ind](X,Y))", "a typo on this line (corrected here) stress['xy'] = 2*E/(g*R**2)*X*Y stress['yx'] = stress['xy']", "range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in range(3,4): for j in range(3,4): strain[i*10+j]", "- a**2/12)+\\ ((3+nu)/2+5*(a/b)**2+(1-nu)/2*(a/b)**4)*(Y**2 - b**2/12)) for k in stress: stress[k][np.abs(X) > a/2] =", "strain_aniso, P_aniso = anisotropic_rectangular(Rx, Ry, a, b, thickness, S) x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) meps =", "-S[0,5]*a**2, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), S[1,1]*a**2*b**2/3 + 3*S[5,5]*a**4/20, (S[0,1]/3 + S[5,5]/12)*a**2*b**2, -3*S[1,5]*a**4/20, - S[0,5]*a**2*b**2/12,", "strain = {} g = 8 + 10*((a/b)**2+(b/a)**2) + (1-nu)*((a/b)**2-(b/a)**2)**2 stress['xx'] = E/(g*R**2)", "#Numerical solution through solving a linear system A*x = b B = np.array([0,0,0,0,0,0,0,0,-0.5])", "-3*S[1,5]*a**4/20, - S[0,5]*a**2*b**2/12, -2*S[1,5]]) #dL/C31 A.append([S[5,5]*b**2, -S[1,5]*b**2, -S[0,5]*b**2, -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), (S[0,1]/3 +", "range(1,4): for j in range(1,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y)", "np.isnan(stress_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y) - stress_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[str_ind](X,Y)), np.isnan(stress_aniso[str_ind](X,Y))))) #Check strains for i", "= 1/(2*R**2*(3*(S[0,0]+S[1,1])+2*S[0,1]+S[5,5])) stress['xx'] = D*(L**2/4-X**2-3*Y**2) stress['yy'] = D*(L**2/4-3*X**2-Y**2) stress['xy'] = 2*D*X*Y stress['yx'] =", "S[4,4] = 2*(1+nu) S[5,5] = 2*(1+nu) S = S/E stress_iso, strain_iso, P_iso =", "= np.nan for k in strain: strain[k][X**2+Y**2 > L**2/4] = np.nan stress_imp, strain_imp,", "int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y) - strain_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[num_ind](X,Y)), np.isnan(strain_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y) - strain_aniso[str_ind](X,Y))", "R = np.sqrt(Rx*Ry) L = 100.0 x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) stress = {} strain =", "strain['zz'] = nu/(4*R**2)*(X**2+Y**2-L**2/8) #missing zero strains from sbcalc strain['xz'] = X*0 strain['zx'] =", "= ((1-nu)*L**2/4-(1-3*nu)*Y**2-(3-nu)*X**2)/(16*R**2) strain['xy'] = (1+nu)/(8*R**2)*X*Y strain['yx'] = strain['xy'] strain['zz'] = nu/(4*R**2)*(X**2+Y**2-L**2/8) #missing zero", "E/(g*R**2) * (b**2/12-Y**2 + ((1+nu)/2 + 5*(b/a)**2 + (1-nu)/2*(b/a)**4)*(a**2/12-X**2)) #sbcalc has a typo", "i in range(1,4): for j in range(1,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE", "= 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) L = 100.0 #Calculate the", "\"\"\" Tests for the transverse deformation functions. Run with pytest. Created on Sat", "crystal S[0,0] = 1 S[1,1] = 1 S[2,2] = 1 S[0,1] = -nu", "here) stress['xy'] = 2*E/(g*R**2)*X*Y stress['yx'] = stress['xy'] strain['zz'] = nu/(g*R**2) * (((3+nu)/2+5*(b/a)**2+(1-nu)/2*(b/a)**4)*(X**2 -", "5*S[1,5]*a**2*b**2/12), -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), 3*S[0,1]*a**4/20 + S[1,1]*a**2*b**2/12, 3*S[0,1]*b**4/20 + S[0,0]*a**2*b**2/12, 2*S[0,1] + S[5,5]])", "= np.nan stress_imp, strain_imp, P_imp = anisotropic_rectangular(Rx, Ry, a, b, thickness, S) #add", "S[5,5] = 2*(1+nu) S = S/E stress_iso, strain_iso, P_iso = isotropic_rectangular(Rx, Ry, a,", "range(1,3): str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) assert", "in range(1,3): for j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in range(3,4):", "stress_iso, strain_iso, P_iso = isotropic_circular(Rx, Ry, L, thickness, nu, E) stress_aniso, strain_aniso, P_aniso", "np.isnan(P_aniso(X,Y))))) def test_isotropic_rectangular(): #Calculate the reference stresses and strains as implemented in the", "- stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in [1,3]: for j in", "np.arctan2(Y,X) stress = {} strain = {} D = 1/(2*R**2*(3*(S[0,0]+S[1,1])+2*S[0,1]+S[5,5])) stress['xx'] = D*(L**2/4-X**2-3*Y**2)", "if i==1 and j == 1: continue strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] meps = np.finfo(np.float).eps", "> L**2/4] = np.nan stress_imp, strain_imp, P_imp = anisotropic_circular(Rx, Ry, L, thickness, S)", "TTcrystal(crystal='Si',hkl=[9,5,1], thickness=Quantity(0.1,'mm')) S = ttx.S.in_units('GPa^-1') thickness = 0.1 Rx = 1000.0 Ry =", "-nu S[1,0] = -nu S[2,0] = -nu S[2,1] = -nu S[3,3] = 2*(1+nu)", "stress['xx'] = E/(g*R**2) * (a**2/12-X**2 + ((1+nu)/2 + 5*(a/b)**2 + (1-nu)/2*(a/b)**4)*(b**2/12-Y**2)) stress['yy'] =", "np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_isotropic_circular(): E = 165 nu = 0.22 thickness = 0.1 Rx", "i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[num_ind] - stress_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y))))) assert", "100.0 b = 50.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) stress = {} strain = {} g", "#In sbcalc, there's incorrectly sin instead of cos for k in stress: stress[k][X**2+Y**2", "E) stress_aniso, strain_aniso, P_aniso = anisotropic_circular(Rx, Ry, L, thickness, S) x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) meps", "elastic matrix for isotropic crystal S[0,0] = 1 S[1,1] = 1 S[2,2] =", "in range(1,3): for j in range(1,3): str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y))", "for j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in range(1,4): for j", "notation uzzaux1 = (S[2,0]+S[2,1])*L**2/4 uzzaux2 = 2*(S[2,0]+S[2,1]) uzzaux3 = np.sqrt((S[2,1]-S[2,0])**2+S[2,5]**2) beta = np.arctan2(S[2,5],(S[2,1]-S[2,0]))", "b, thickness, S) #add int indexing int2char_ind = ['','x','y','z'] for i in range(1,3):", "np.sqrt((S[2,1]-S[2,0])**2+S[2,5]**2) beta = np.arctan2(S[2,5],(S[2,1]-S[2,0])) strain['zz'] = D*(uzzaux1 - (uzzaux2+uzzaux3*np.cos(2*phi+beta))*r_squared) #In sbcalc, there's incorrectly", "5*(b/a)**2 + (1-nu)/2*(b/a)**4)*(a**2/12-X**2)) #sbcalc has a typo on this line (corrected here) stress['xy']", "A.append([12*S[5,5], -12*S[1,5], -12*S[0,5], -(S[0,5]*a**2 + S[1,5]*b**2), S[5,5]*a**2, S[5,5]*b**2, -S[1,5]*a**2, -S[0,5]*b**2, 0]) #dL/C11 A.append([-12*S[0,5],", "+ C13*Y**2)/R**2 stress['yx'] = stress['xy'].copy() strain['zz'] = S[2,0]*stress['xx'] + S[2,1]*stress['yy'] + S[2,5]*stress['xy'] strain['xz']", "the #deprecated sbcalc package E = 165 nu = 0.22 thickness = 0.1", "stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(3,4): for j in range(3,4):", "S[0,0]]) #dL/C04 A.append([0, 0, 0, 2*S[0,1] + S[5,5], -2*S[1,5], -2*S[0,5], S[1,1], S[0,0], 0])", "= 0.22 thickness = 0.1 Rx = 1000.0 Ry = 500.0 a =", "= 0.1 Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) L =", "(1+nu)/(8*R**2)*X*Y strain['yx'] = strain['xy'] strain['zz'] = nu/(4*R**2)*(X**2+Y**2-L**2/8) #missing zero strains from sbcalc strain['xz']", "nu = 0.22 thickness = 0.1 Rx = 1000.0 Ry = 500.0 a", "= {} strain = {} D = 1/(2*R**2*(3*(S[0,0]+S[1,1])+2*S[0,1]+S[5,5])) stress['xx'] = D*(L**2/4-X**2-3*Y**2) stress['yy'] =", "sbcalc, there's incorrectly sin instead of cos for k in stress: stress[k][X**2+Y**2 >", "strain[k][X**2+Y**2 > L**2/4] = np.nan stress_imp, strain_imp, P_imp = anisotropic_circular(Rx, Ry, L, thickness,", "epsilon #add int indexing int2char_ind = ['','x','y','z'] for i in range(1,3): for j", "strain['yz'] = X*0 strain['zy'] = X*0 for k in stress: stress[k][X**2+Y**2 > L**2/4]", "np.isnan(stress_aniso[str_ind](X,Y))))) #Check strains for i in range(1,4): for j in range(1,4): num_ind =", "num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y) - strain_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[num_ind](X,Y)),", "* (a**2/12-X**2 + ((1+nu)/2 + 5*(a/b)**2 + (1-nu)/2*(a/b)**4)*(b**2/12-Y**2)) stress['yy'] = E/(g*R**2) * (b**2/12-Y**2", "stress['yy'] = -E/(16*R**2)*(3*X**2 + Y**2 -L**2/4) stress['xy'] = E/(8*R**2)*X*Y stress['yx'] = stress['xy'] strain['xx']", "range(1,3): for j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in range(3,4): for", "phi = np.arctan2(Y,X) stress = {} strain = {} D = 1/(2*R**2*(3*(S[0,0]+S[1,1])+2*S[0,1]+S[5,5])) stress['xx']", "Sat May 9 00:09:00 2020 @author: aripekka \"\"\" import sys import os.path import", "= E/(g*R**2) * (a**2/12-X**2 + ((1+nu)/2 + 5*(a/b)**2 + (1-nu)/2*(a/b)**4)*(b**2/12-Y**2)) stress['yy'] = E/(g*R**2)", "-12*S[1,5], -12*S[0,5], -(S[0,5]*a**2 + S[1,5]*b**2), S[5,5]*a**2, S[5,5]*b**2, -S[1,5]*a**2, -S[0,5]*b**2, 0]) #dL/C11 A.append([-12*S[0,5], 12*S[0,1],", "C31, C13, C40, C04, L = np.linalg.solve(A,B) stress['xx'] = (C02 + C22*X**2 +", "P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_sbcalc():", "+ ((1+nu)/2 + 5*(a/b)**2 + (1-nu)/2*(a/b)**4)*(b**2/12-Y**2)) stress['yy'] = E/(g*R**2) * (b**2/12-Y**2 + ((1+nu)/2", "#machine epsilon #Check stresses for i in range(1,3): for j in range(1,3): num_ind", "A.append([-(S[0,5]*a**2 + S[1,5]*b**2), S[0,1]*a**2 + S[1,1]*b**2, S[0,0]*a**2 + S[0,1]*b**2, (S[0,1]+2*S[5,5])*a**2*b**2/6 + 3*(S[0,0]*a**4 +", "zero strains from sbcalc strain['xz'] = X*0 strain['zx'] = X*0 strain['yz'] = X*0", "i in range(3,4): for j in range(3,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j]", "anisotropic_rectangular(Rx, Ry, a, b, thickness, S) x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon", "-S[1,5]*b**2, -S[0,5]*b**2, -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), (S[0,1]/3 + S[5,5]/12)*a**2*b**2, S[0,0]*a**2*b**2/3 + 3*S[5,5]*b**4/20, -S[1,5]*a**2*b**2/12, -3*S[0,5]*b**4/20,", "strain_imp, P_imp = isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E) meps = np.finfo(np.float).eps", "a/2] = np.nan strain[k][np.abs(Y) > b/2] = np.nan #add int indexing int2char_ind =", "# -*- coding: utf-8 -*- \"\"\" Tests for the transverse deformation functions. Run", "Ry, L, thickness, S) meps = np.finfo(np.float).eps #machine epsilon #add int indexing int2char_ind", "= 2*(S[2,0]+S[2,1]) uzzaux3 = np.sqrt((S[2,1]-S[2,0])**2+S[2,5]**2) beta = np.arctan2(S[2,5],(S[2,1]-S[2,0])) strain['zz'] = D*(uzzaux1 - (uzzaux2+uzzaux3*np.cos(2*phi+beta))*r_squared)", "cos for k in stress: stress[k][X**2+Y**2 > L**2/4] = np.nan for k in", "+ 5*S[0,5]*a**2*b**2/12), 3*S[0,1]*a**4/20 + S[1,1]*a**2*b**2/12, 3*S[0,1]*b**4/20 + S[0,0]*a**2*b**2/12, 2*S[0,1] + S[5,5]]) #dL/C22 A.append([S[5,5]*a**2,", "= 500.0 R = np.sqrt(Rx*Ry) L = 100.0 x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) stress = {}", "+ S[2,1]*stress['yy'] + S[2,5]*stress['xy'] strain['xz'] = 0.5*(S[3,0]*stress['xx'] + S[3,1]*stress['yy'] + S[3,5]*stress['xy'] ) strain['zx']", "= ttx.S.in_units('GPa^-1') thickness = 0.1 Rx = 1000.0 Ry = 500.0 R =", "-L**2/4) stress['xy'] = E/(8*R**2)*X*Y stress['yx'] = stress['xy'] strain['xx'] = ((1-nu)*L**2/4-(1-3*nu)*X**2-(3-nu)*Y**2)/(16*R**2) strain['yy'] = ((1-nu)*L**2/4-(1-3*nu)*Y**2-(3-nu)*X**2)/(16*R**2)", "Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) S = ttx.S.in_units('GPa^-1') stress", "(1-nu)/2*(a/b)**4)*(b**2/12-Y**2)) stress['yy'] = E/(g*R**2) * (b**2/12-Y**2 + ((1+nu)/2 + 5*(b/a)**2 + (1-nu)/2*(b/a)**4)*(a**2/12-X**2)) #sbcalc", "from tbcalc.transverse_deformation import * from pyTTE import TTcrystal, Quantity def test_isotropic_circular(): #Calculate the", "uzzaux2 = 2*(S[2,0]+S[2,1]) uzzaux3 = np.sqrt((S[2,1]-S[2,0])**2+S[2,5]**2) beta = np.arctan2(S[2,5],(S[2,1]-S[2,0])) strain['zz'] = D*(uzzaux1 -", "100.0 S = np.zeros((6,6)) #The elastic matrix for isotropic crystal S[0,0] = 1", "TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_rectangular(Rx, Ry, a, b, thickness, nu,", "S) x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon int2char_ind = ['','x','y','z'] meps =", "(S[0,1]+2*S[5,5])*a**2*b**2/6 + 3*(S[0,0]*a**4 + S[1,1]*b**4)/20, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), 3*S[0,1]*a**4/20 +", "+ C40*X**2)/R**2 stress['xy'] = -(C11 + 2*C22*X*Y + C31*X**2 + C13*Y**2)/R**2 stress['yx'] =", "= np.ones(X.shape) mask[np.abs(X)>a/2] = 0 mask[np.abs(Y)>b/2] = 0 for key in stress: stress[key][mask", "= nu/(g*R**2) * (((3+nu)/2+5*(b/a)**2+(1-nu)/2*(b/a)**4)*(X**2 - a**2/12)+\\ ((3+nu)/2+5*(a/b)**2+(1-nu)/2*(a/b)**4)*(Y**2 - b**2/12)) for k in stress:", "#check the contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps,", "= ['','x','y','z'] for i in range(1,3): for j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]]", "thickness=Quantity(0.1,'mm')) a = 100.0 b = 100.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) thickness = 0.1 Rx", "5*S[1,5]*a**2*b**2/12), S[1,1]*a**2*b**2/3 + 3*S[5,5]*a**4/20, (S[0,1]/3 + S[5,5]/12)*a**2*b**2, -3*S[1,5]*a**4/20, - S[0,5]*a**2*b**2/12, -2*S[1,5]]) #dL/C31 A.append([S[5,5]*b**2,", "E) stress_aniso, strain_aniso, P_aniso = anisotropic_rectangular(Rx, Ry, a, b, thickness, S) x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x)", "IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_circular(Rx, Ry, L, thickness, nu, E) meps =", "3*S[0,1]*b**4/20, -S[0,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, S[0,1]*a**2*b**2/12, 3*S[0,0]*b**4/20, S[0,0]]) #dL/C04 A.append([0, 0, 0, 2*S[0,1] + S[5,5],", "X,Y=np.meshgrid(x,x) r_squared = X**2+Y**2 phi = np.arctan2(Y,X) stress = {} strain = {}", "int2char_ind = ['','x','y','z'] meps = np.finfo(np.float).eps #machine epsilon #Check stresses for i in", "- P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_anisotropic_rectangular_vs_old_Version(): #For compliance matrix ttx =", "= np.nan #add int indexing int2char_ind = ['','x','y','z'] for i in range(1,3): for", "np.finfo(np.float).eps #machine epsilon #add int indexing int2char_ind = ['','x','y','z'] for i in range(1,3):", "= i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y)))))", "np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_isotropic_circular(): E = 165 nu", "= 500.0 R = np.sqrt(Rx*Ry) S = ttx.S.in_units('GPa^-1') stress = {} strain =", "= np.nan for k in strain: strain[k][X**2+Y**2 > L**2/4] = np.nan #add int", "S[1,1]*b**2, S[0,0]*a**2 + S[0,1]*b**2, (S[0,1]+2*S[5,5])*a**2*b**2/6 + 3*(S[0,0]*a**4 + S[1,1]*b**4)/20, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), -(3*S[1,5]*b**4/20", "in strain: strain[k][X**2+Y**2 > L**2/4] = np.nan stress_imp, strain_imp, P_imp = anisotropic_circular(Rx, Ry,", "np.all(np.logical_or(np.abs(stress[num_ind] - stress_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps,", "= 2*E/(g*R**2)*X*Y stress['yx'] = stress['xy'] strain['zz'] = nu/(g*R**2) * (((3+nu)/2+5*(b/a)**2+(1-nu)/2*(b/a)**4)*(X**2 - a**2/12)+\\ ((3+nu)/2+5*(a/b)**2+(1-nu)/2*(a/b)**4)*(Y**2", "L**2/4] = np.nan for k in strain: strain[k][X**2+Y**2 > L**2/4] = np.nan stress_imp,", "compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,1], thickness=Quantity(0.1,'mm')) S = ttx.S.in_units('GPa^-1') thickness = 0.1 Rx", "for j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in [1,3]: for j", "= 100.0 #Calculate the stresses and strains as in the now deprecated sbcalc", "- stress_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[str_ind](X,Y)), np.isnan(stress_aniso[str_ind](X,Y))))) #Check strains for i in range(1,4): for", "in range(1,3): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y) - stress_aniso[num_ind](X,Y)) <", "P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_rectangular_vs_isotropic_rectangular():", "nu, E) stress_aniso, strain_aniso, P_aniso = anisotropic_circular(Rx, Ry, L, thickness, S) x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x)", "range(1,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp", "1000.0 Ry = 500.0 L = 100.0 S = np.zeros((6,6)) #The elastic matrix", "S[1,5]*b**2), S[5,5]*a**2, S[5,5]*b**2, -S[1,5]*a**2, -S[0,5]*b**2, 0]) #dL/C11 A.append([-12*S[0,5], 12*S[0,1], 12*S[0,0], (S[0,0]*a**2 + S[0,1]*b**2),", "(S[0,1]/3 + S[5,5]/12)*a**2*b**2, S[0,0]*a**2*b**2/3 + 3*S[5,5]*b**4/20, -S[1,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, -2*S[0,5]]) #dL/C13 A.append([-S[1,5]*a**2, S[1,1]*a**2, S[0,1]*a**2,", "3*S[1,1]*a**4/20, S[0,1]*a**2*b**2/12, S[1,1]]) #dL/C40 A.append([-S[0,5]*b**2, S[0,1]*b**2, S[0,0]*b**2, S[0,0]*a**2*b**2/12 + 3*S[0,1]*b**4/20, -S[0,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, S[0,1]*a**2*b**2/12,", "strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] meps = np.finfo(np.float).eps #machine epsilon for i in range(1,3): for", "-S[1,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, -2*S[0,5]]) #dL/C13 A.append([-S[1,5]*a**2, S[1,1]*a**2, S[0,1]*a**2, S[1,1]*a**2*b**2/12 + 3*S[0,1]*a**4/20, -3*S[1,5]*a**4/20, -S[1,5]*a**2*b**2/12, 3*S[1,1]*a**4/20,", "2*(1+nu) S = S/E stress_iso, strain_iso, P_iso = isotropic_rectangular(Rx, Ry, a, b, thickness,", "k in stress: stress[k][np.abs(X) > a/2] = np.nan stress[k][np.abs(Y) > b/2] = np.nan", "continue strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] meps = np.finfo(np.float).eps #machine epsilon for i in range(1,3):", "C13*Y**2)/R**2 stress['yx'] = stress['xy'].copy() strain['zz'] = S[2,0]*stress['xx'] + S[2,1]*stress['yy'] + S[2,5]*stress['xy'] strain['xz'] =", "stress['yy'] = D*(L**2/4-3*X**2-Y**2) stress['xy'] = 2*D*X*Y stress['yx'] = stress['xy'] #shorthand notation uzzaux1 =", "in the #deprecated sbcalc package E = 165 nu = 0.22 thickness =", "int2char_ind = ['','x','y','z'] for i in range(1,3): for j in range(1,3): stress[i*10+j] =", "np.logical_and(np.isnan(strain_iso[num_ind](X,Y)), np.isnan(strain_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y) - strain_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[str_ind](X,Y)), np.isnan(strain_aniso[str_ind](X,Y))))) #Check contact forces", "lambda] A = [] A.append([12*S[5,5], -12*S[1,5], -12*S[0,5], -(S[0,5]*a**2 + S[1,5]*b**2), S[5,5]*a**2, S[5,5]*b**2, -S[1,5]*a**2,", "-nu S[3,3] = 2*(1+nu) S[4,4] = 2*(1+nu) S[5,5] = 2*(1+nu) S = S/E", "str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y) - stress_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[num_ind](X,Y)), np.isnan(stress_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y)", "for i in range(1,4): for j in range(1,4): num_ind = i*10+j str_ind =", "= (S[2,0]+S[2,1])*L**2/4 uzzaux2 = 2*(S[2,0]+S[2,1]) uzzaux3 = np.sqrt((S[2,1]-S[2,0])**2+S[2,5]**2) beta = np.arctan2(S[2,5],(S[2,1]-S[2,0])) strain['zz'] =", "strains as implemented in the #deprecated sbcalc package E = 165 nu =", "np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_anisotropic_rectangular_vs_old_Version(): #For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,3], thickness=Quantity(0.1,'mm')) a =", "> L**2/4] = np.nan #add int indexing int2char_ind = ['','x','y','z'] for i in", "assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(3,4): for", "strain[k][np.abs(Y) > b/2] = np.nan #add int indexing int2char_ind = ['','x','y','z'] for i", "np.nan stress_imp, strain_imp, P_imp = anisotropic_circular(Rx, Ry, L, thickness, S) meps = np.finfo(np.float).eps", "== 1: continue strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] meps = np.finfo(np.float).eps #machine epsilon for i", "0, 2*S[0,1] + S[5,5], -2*S[1,5], -2*S[0,5], S[1,1], S[0,0], 0]) A = np.array(A) C11,", "stress = {} strain = {} #Numerical solution through solving a linear system", "C20, C02, C22, C31, C13, C40, C04, L = np.linalg.solve(A,B) stress['xx'] = (C02", "[1,3]: for j in [1,3]: if i==1 and j == 1: continue num_ind", "1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) L = 100.0 #Calculate the stresses", "= stress[int2char_ind[i]+int2char_ind[j]] for i in range(3,4): for j in range(3,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]]", "Ry, a, b, thickness, nu, E) stress_aniso, strain_aniso, P_aniso = anisotropic_rectangular(Rx, Ry, a,", "meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_isotropic_circular(): E = 165 nu = 0.22 thickness =", "int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y) - stress_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[num_ind](X,Y)), np.isnan(stress_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y) - stress_aniso[str_ind](X,Y))", "x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) thickness = 0.1 Rx = 1000.0 Ry = 500.0 R =", "500.0 R = np.sqrt(Rx*Ry) L = 100.0 x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) stress = {} strain", "j in range(1,3): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[num_ind] - stress_imp[num_ind](X,Y))", "thickness = 0.1 Rx = 1000.0 Ry = 500.0 a = 100.0 b", "np.sqrt(Rx*Ry) S = ttx.S.in_units('GPa^-1') stress = {} strain = {} #Numerical solution through", "0]) #dL/C20 A.append([-(S[0,5]*a**2 + S[1,5]*b**2), S[0,1]*a**2 + S[1,1]*b**2, S[0,0]*a**2 + S[0,1]*b**2, (S[0,1]+2*S[5,5])*a**2*b**2/6 +", "+ S[5,5]/12)*a**2*b**2, -3*S[1,5]*a**4/20, - S[0,5]*a**2*b**2/12, -2*S[1,5]]) #dL/C31 A.append([S[5,5]*b**2, -S[1,5]*b**2, -S[0,5]*b**2, -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12),", "S[0,1]*b**2, S[0,0]*b**2, S[0,0]*a**2*b**2/12 + 3*S[0,1]*b**4/20, -S[0,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, S[0,1]*a**2*b**2/12, 3*S[0,0]*b**4/20, S[0,0]]) #dL/C04 A.append([0, 0,", "in [1,3]: for j in [1,3]: if i==1 and j == 1: continue", "C04*Y**2)/R**2 stress['yy'] = (C20 + C22*Y**2 + 2*C31*X*Y + C40*X**2)/R**2 stress['xy'] = -(C11", "np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain[str_ind] - strain_imp[str_ind](X,Y)) < meps,", "= 1000.0 Ry = 500.0 a = 100.0 b = 100.0 S =", "Ry, L, thickness, S) x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon int2char_ind =", "#Apply mask mask = np.ones(X.shape) mask[np.abs(X)>a/2] = 0 mask[np.abs(Y)>b/2] = 0 for key", "((1-nu)*L**2/4-(1-3*nu)*X**2-(3-nu)*Y**2)/(16*R**2) strain['yy'] = ((1-nu)*L**2/4-(1-3*nu)*Y**2-(3-nu)*X**2)/(16*R**2) strain['xy'] = (1+nu)/(8*R**2)*X*Y strain['yx'] = strain['xy'] strain['zz'] = nu/(4*R**2)*(X**2+Y**2-L**2/8)", "np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_sbcalc(): #For compliance matrix ttx", "= X*0 strain['zx'] = X*0 strain['yz'] = X*0 strain['zy'] = X*0 for k", "+ C04*Y**2)/R**2 stress['yy'] = (C20 + C22*Y**2 + 2*C31*X*Y + C40*X**2)/R**2 stress['xy'] =", "X,Y=np.meshgrid(x,x) stress = {} strain = {} stress['xx'] = -E/(16*R**2)*(X**2 + 3*Y**2 -L**2/4)", "+ (1-nu)/2*(b/a)**4)*(a**2/12-X**2)) #sbcalc has a typo on this line (corrected here) stress['xy'] =", "= np.sqrt(Rx*Ry) L = 100.0 #Calculate the stresses and strains as in the", "B = np.array([0,0,0,0,0,0,0,0,-0.5]) #Construction of A matrix rows #[C11, C20, C02, C22, C31,", "assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_sbcalc(): #For compliance matrix", "{} D = 1/(2*R**2*(3*(S[0,0]+S[1,1])+2*S[0,1]+S[5,5])) stress['xx'] = D*(L**2/4-X**2-3*Y**2) stress['yy'] = D*(L**2/4-3*X**2-Y**2) stress['xy'] = 2*D*X*Y", "+ S[3,1]*stress['yy'] + S[3,5]*stress['xy'] ) strain['zx'] = strain['xz'].copy() #Apply mask mask = np.ones(X.shape)", "strain_imp['zz'](X,Y)) < meps, np.logical_and(np.isnan(strain['zz']), np.isnan(strain_imp['zz'](X,Y))))) #check the contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert", "['','x','y','z'] for i in range(1,3): for j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for", "in range(3,4): for j in range(3,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert", "strain['zz'] = D*(uzzaux1 - (uzzaux2+uzzaux3*np.cos(2*phi+beta))*r_squared) #In sbcalc, there's incorrectly sin instead of cos", "assert np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y) - strain_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[str_ind](X,Y)), np.isnan(strain_aniso[str_ind](X,Y))))) #Check contact forces assert np.all(np.logical_or(np.abs(P_iso(X,Y)", "= (C02 + C22*X**2 + 2*C13*X*Y + C04*Y**2)/R**2 stress['yy'] = (C20 + C22*Y**2", "a, b, thickness, S) #add int indexing int2char_ind = ['','x','y','z'] for i in", "np.sqrt(Rx*Ry) L = 100.0 #Calculate the stresses and strains as in the now", "D*(L**2/4-X**2-3*Y**2) stress['yy'] = D*(L**2/4-3*X**2-Y**2) stress['xy'] = 2*D*X*Y stress['yx'] = stress['xy'] #shorthand notation uzzaux1", "-thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_isotropic_circular(): E =", "strain['xy'] strain['zz'] = nu/(4*R**2)*(X**2+Y**2-L**2/8) #missing zero strains from sbcalc strain['xz'] = X*0 strain['zx']", "#shorthand notation uzzaux1 = (S[2,0]+S[2,1])*L**2/4 uzzaux2 = 2*(S[2,0]+S[2,1]) uzzaux3 = np.sqrt((S[2,1]-S[2,0])**2+S[2,5]**2) beta =", "= anisotropic_circular(Rx, Ry, L, thickness, S) meps = np.finfo(np.float).eps #machine epsilon #add int", "500.0 R = np.sqrt(Rx*Ry) a = 100.0 b = 50.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) stress", "thickness, S) x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon int2char_ind = ['','x','y','z'] meps", "((1+nu)/2 + 5*(b/a)**2 + (1-nu)/2*(b/a)**4)*(a**2/12-X**2)) #sbcalc has a typo on this line (corrected", "= nu/(4*R**2)*(X**2+Y**2-L**2/8) #missing zero strains from sbcalc strain['xz'] = X*0 strain['zx'] = X*0", "(a**2/12-X**2 + ((1+nu)/2 + 5*(a/b)**2 + (1-nu)/2*(a/b)**4)*(b**2/12-Y**2)) stress['yy'] = E/(g*R**2) * (b**2/12-Y**2 +", "50.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) stress = {} strain = {} g = 8 +", "assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain[str_ind] - strain_imp[str_ind](X,Y)) <", "S[1,1]*a**2, S[0,1]*b**2, 0]) #dL/C20 A.append([-(S[0,5]*a**2 + S[1,5]*b**2), S[0,1]*a**2 + S[1,1]*b**2, S[0,0]*a**2 + S[0,1]*b**2,", "nu/(g*R**2) * (((3+nu)/2+5*(b/a)**2+(1-nu)/2*(b/a)**4)*(X**2 - a**2/12)+\\ ((3+nu)/2+5*(a/b)**2+(1-nu)/2*(a/b)**4)*(Y**2 - b**2/12)) for k in stress: stress[k][np.abs(X)", "test_isotropic_circular(): #Calculate the reference stresses and strains as implemented in the #deprecated sbcalc", "stress: stress[k][np.abs(X) > a/2] = np.nan stress[k][np.abs(Y) > b/2] = np.nan for k", "= np.nan for key in strain: strain[key][mask < 0.5] = np.nan stress_imp, strain_imp,", "#missing zero strains from sbcalc strain['xz'] = X*0 strain['zx'] = X*0 strain['yz'] =", "strain_aniso, P_aniso = anisotropic_circular(Rx, Ry, L, thickness, S) x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps", "+ 5*(b/a)**2 + (1-nu)/2*(b/a)**4)*(a**2/12-X**2)) #sbcalc has a typo on this line (corrected here)", "nu/(4*R**2)*(X**2+Y**2-L**2/8) #missing zero strains from sbcalc strain['xz'] = X*0 strain['zx'] = X*0 strain['yz']", "Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) L = 100.0 x=np.linspace(-L/2,L/2,150)", "+ 10*((a/b)**2+(b/a)**2) + (1-nu)*((a/b)**2-(b/a)**2)**2 stress['xx'] = E/(g*R**2) * (a**2/12-X**2 + ((1+nu)/2 + 5*(a/b)**2", "np.isnan(strain_aniso[str_ind](X,Y))))) #Check contact forces assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def", "S[1,1]*b**4)/20, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), 3*S[0,1]*a**4/20 + S[1,1]*a**2*b**2/12, 3*S[0,1]*b**4/20 + S[0,0]*a**2*b**2/12,", "sbcalc x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) r_squared = X**2+Y**2 phi = np.arctan2(Y,X) stress = {} strain", "assert np.all(np.logical_or(np.abs(strain[str_ind] - strain_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y))))) #check the contact force P", "the stresses and strains as in the now deprecated sbcalc x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) r_squared", "1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) a = 100.0 b = 50.0", "str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[num_ind] - stress_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress[str_ind]", "meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain['zz'] - strain_imp['zz'](X,Y)) < meps, np.logical_and(np.isnan(strain['zz']), np.isnan(strain_imp['zz'](X,Y))))) #check the", "-*- coding: utf-8 -*- \"\"\" Tests for the transverse deformation functions. Run with", "assert np.all(np.logical_or(np.abs(stress[num_ind] - stress_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) <", "= ttx.S.in_units('GPa^-1') stress = {} strain = {} #Numerical solution through solving a", "np.zeros((6,6)) #The elastic matrix for isotropic crystal S[0,0] = 1 S[1,1] = 1", "transverse deformation functions. Run with pytest. Created on Sat May 9 00:09:00 2020", "g = 8 + 10*((a/b)**2+(b/a)**2) + (1-nu)*((a/b)**2-(b/a)**2)**2 stress['xx'] = E/(g*R**2) * (a**2/12-X**2 +", "P_iso = isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E) stress_aniso, strain_aniso, P_aniso =", "+ S[1,1]*b**4)/20, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), 3*S[0,1]*a**4/20 + S[1,1]*a**2*b**2/12, 3*S[0,1]*b**4/20 +", "Run with pytest. Created on Sat May 9 00:09:00 2020 @author: aripekka \"\"\"", "= np.nan strain[k][np.abs(Y) > b/2] = np.nan #add int indexing int2char_ind = ['','x','y','z']", "b = 100.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) thickness = 0.1 Rx = 1000.0 Ry =", "= i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[num_ind] - stress_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y)))))", "stress_imp, strain_imp, P_imp = isotropic_circular(Rx, Ry, L, thickness, nu, E) meps = np.finfo(np.float).eps", "np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_sbcalc(): #For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,1], thickness=Quantity(0.1,'mm')) S = ttx.S.in_units('GPa^-1')", "0.22 thickness = 0.1 Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry)", "THE REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_rectangular(Rx, Ry, a, b,", "5*S[0,5]*a**2*b**2/12), (S[0,1]/3 + S[5,5]/12)*a**2*b**2, S[0,0]*a**2*b**2/3 + 3*S[5,5]*b**4/20, -S[1,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, -2*S[0,5]]) #dL/C13 A.append([-S[1,5]*a**2, S[1,1]*a**2,", "S[1,1]*a**2*b**2/3 + 3*S[5,5]*a**4/20, (S[0,1]/3 + S[5,5]/12)*a**2*b**2, -3*S[1,5]*a**4/20, - S[0,5]*a**2*b**2/12, -2*S[1,5]]) #dL/C31 A.append([S[5,5]*b**2, -S[1,5]*b**2,", "-(C11 + 2*C22*X*Y + C31*X**2 + C13*Y**2)/R**2 stress['yx'] = stress['xy'].copy() strain['zz'] = S[2,0]*stress['xx']", "C02, C22, C31, C13, C40, C04, L = np.linalg.solve(A,B) stress['xx'] = (C02 +", "strain['xz'].copy() #Apply mask mask = np.ones(X.shape) mask[np.abs(X)>a/2] = 0 mask[np.abs(Y)>b/2] = 0 for", "Tests for the transverse deformation functions. Run with pytest. Created on Sat May", "#For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,3], thickness=Quantity(0.1,'mm')) a = 100.0 b = 100.0", "= i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y) - strain_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[num_ind](X,Y)), np.isnan(strain_aniso[num_ind](X,Y)))))", "for j in range(1,3): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[num_ind] -", "5*(a/b)**2 + (1-nu)/2*(a/b)**4)*(b**2/12-Y**2)) stress['yy'] = E/(g*R**2) * (b**2/12-Y**2 + ((1+nu)/2 + 5*(b/a)**2 +", "100.0 b = 100.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) thickness = 0.1 Rx = 1000.0 Ry", "assert np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y) - stress_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[str_ind](X,Y)), np.isnan(stress_aniso[str_ind](X,Y))))) #Check strains for i in", "500.0 L = 100.0 S = np.zeros((6,6)) #The elastic matrix for isotropic crystal", "for i in range(1,3): for j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i", "np.linalg.solve(A,B) stress['xx'] = (C02 + C22*X**2 + 2*C13*X*Y + C04*Y**2)/R**2 stress['yy'] = (C20", "for j in [1,3]: if i==1 and j == 1: continue strain[i*10+j] =", "np.array(A) C11, C20, C02, C22, C31, C13, C40, C04, L = np.linalg.solve(A,B) stress['xx']", "j in range(1,3): str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]),", "stress[k][X**2+Y**2 > L**2/4] = np.nan for k in strain: strain[k][X**2+Y**2 > L**2/4] =", "S[0,2] = -nu S[1,2] = -nu S[1,0] = -nu S[2,0] = -nu S[2,1]", "Quantity def test_isotropic_circular(): #Calculate the reference stresses and strains as implemented in the", "= 2*D*X*Y stress['yx'] = stress['xy'] #shorthand notation uzzaux1 = (S[2,0]+S[2,1])*L**2/4 uzzaux2 = 2*(S[2,0]+S[2,1])", "b = 50.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) stress = {} strain = {} g =", "import os.path import numpy as np sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..')) from tbcalc.transverse_deformation import * from", "np.isnan(stress_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(3,4):", "deformation functions. Run with pytest. Created on Sat May 9 00:09:00 2020 @author:", "in [1,3]: if i==1 and j == 1: continue num_ind = i*10+j str_ind", "+ 3*S[0,1]*b**4/20, -S[0,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, S[0,1]*a**2*b**2/12, 3*S[0,0]*b**4/20, S[0,0]]) #dL/C04 A.append([0, 0, 0, 2*S[0,1] +", "np.finfo(np.float).eps #machine epsilon for i in range(1,3): for j in range(1,3): num_ind =", "= int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y) - stress_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[num_ind](X,Y)), np.isnan(stress_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y) -", "Ry = 500.0 R = np.sqrt(Rx*Ry) a = 100.0 b = 50.0 x=np.linspace(-a/2,a/2,150)", "< meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(3,4): for j in range(3,4): num_ind", "stress['xy'].copy() strain['zz'] = S[2,0]*stress['xx'] + S[2,1]*stress['yy'] + S[2,5]*stress['xy'] strain['xz'] = 0.5*(S[3,0]*stress['xx'] + S[3,1]*stress['yy']", "2*(1+nu) S = S/E stress_iso, strain_iso, P_iso = isotropic_circular(Rx, Ry, L, thickness, nu,", "stresses and strains as implemented in the #deprecated sbcalc package E = 165", "a linear system A*x = b B = np.array([0,0,0,0,0,0,0,0,-0.5]) #Construction of A matrix", "+ C22*X**2 + 2*C13*X*Y + C04*Y**2)/R**2 stress['yy'] = (C20 + C22*Y**2 + 2*C31*X*Y", "in [1,3]: if i==1 and j == 1: continue strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] meps", "S[0,0]*a**2*b**2/3 + 3*S[5,5]*b**4/20, -S[1,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, -2*S[0,5]]) #dL/C13 A.append([-S[1,5]*a**2, S[1,1]*a**2, S[0,1]*a**2, S[1,1]*a**2*b**2/12 + 3*S[0,1]*a**4/20,", "stress[key][mask < 0.5] = np.nan for key in strain: strain[key][mask < 0.5] =", "i in range(3,4): for j in range(3,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE", "stress['yx'] = stress['xy'] strain['zz'] = nu/(g*R**2) * (((3+nu)/2+5*(b/a)**2+(1-nu)/2*(b/a)**4)*(X**2 - a**2/12)+\\ ((3+nu)/2+5*(a/b)**2+(1-nu)/2*(a/b)**4)*(Y**2 - b**2/12))", "ttx = TTcrystal(crystal='Si',hkl=[9,5,3], thickness=Quantity(0.1,'mm')) a = 100.0 b = 100.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) thickness", "i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y) - stress_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[num_ind](X,Y)), np.isnan(stress_aniso[num_ind](X,Y))))) assert", "A = np.array(A) C11, C20, C02, C22, C31, C13, C40, C04, L =", "strain: strain[key][mask < 0.5] = np.nan stress_imp, strain_imp, P_imp = anisotropic_rectangular(Rx, Ry, a,", "j in range(3,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE IMPLEMENTATION stress_imp,", "(S[1,0]*a**2 + S[1,1]*b**2), -S[1,5]*a**2, -S[1,5]*b**2, S[1,1]*a**2, S[0,1]*b**2, 0]) #dL/C20 A.append([-(S[0,5]*a**2 + S[1,5]*b**2), S[0,1]*a**2", "< meps, np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for", "X*0 strain['zx'] = X*0 strain['yz'] = X*0 strain['zy'] = X*0 for k in", "= -nu S[2,0] = -nu S[2,1] = -nu S[3,3] = 2*(1+nu) S[4,4] =", "strain: strain[k][np.abs(X) > a/2] = np.nan strain[k][np.abs(Y) > b/2] = np.nan #add int", "S[1,1]*a**2*b**2/12 + 3*S[0,1]*a**4/20, -3*S[1,5]*a**4/20, -S[1,5]*a**2*b**2/12, 3*S[1,1]*a**4/20, S[0,1]*a**2*b**2/12, S[1,1]]) #dL/C40 A.append([-S[0,5]*b**2, S[0,1]*b**2, S[0,0]*b**2, S[0,0]*a**2*b**2/12", "< meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in [1,3]: for j in [1,3]: if", "= int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[num_ind] - stress_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress[str_ind] -", "stresses and strains as in the now deprecated sbcalc x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) r_squared =", "- strain_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[str_ind](X,Y)), np.isnan(strain_aniso[str_ind](X,Y))))) #Check contact forces assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y))", "* from pyTTE import TTcrystal, Quantity def test_isotropic_circular(): #Calculate the reference stresses and", "np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in", "i==1 and j == 1: continue num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert", "as implemented in the #deprecated sbcalc package E = 165 nu = 0.22", "REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_rectangular(Rx, Ry, a, b, thickness,", "line (corrected here) stress['xy'] = 2*E/(g*R**2)*X*Y stress['yx'] = stress['xy'] strain['zz'] = nu/(g*R**2) *", "S[5,5]*b**2, -S[1,5]*a**2, -S[0,5]*b**2, 0]) #dL/C11 A.append([-12*S[0,5], 12*S[0,1], 12*S[0,0], (S[0,0]*a**2 + S[0,1]*b**2), -S[0,5]*a**2, -S[0,5]*b**2,", "test_anisotropic_rectangular_vs_isotropic_rectangular(): E = 165 nu = 0.22 thickness = 0.1 Rx = 1000.0", "S[5,5]/12)*a**2*b**2, S[0,0]*a**2*b**2/3 + 3*S[5,5]*b**4/20, -S[1,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, -2*S[0,5]]) #dL/C13 A.append([-S[1,5]*a**2, S[1,1]*a**2, S[0,1]*a**2, S[1,1]*a**2*b**2/12 +", "S[1,5]*b**2), S[0,1]*a**2 + S[1,1]*b**2, S[0,0]*a**2 + S[0,1]*b**2, (S[0,1]+2*S[5,5])*a**2*b**2/6 + 3*(S[0,0]*a**4 + S[1,1]*b**4)/20, -(3*S[0,5]*a**4/20", "range(1,4): for j in range(1,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE", "= (C20 + C22*Y**2 + 2*C31*X*Y + C40*X**2)/R**2 stress['xy'] = -(C11 + 2*C22*X*Y", "test_isotropic_rectangular(): #Calculate the reference stresses and strains as implemented in the #deprecated sbcalc", "= np.sqrt((S[2,1]-S[2,0])**2+S[2,5]**2) beta = np.arctan2(S[2,5],(S[2,1]-S[2,0])) strain['zz'] = D*(uzzaux1 - (uzzaux2+uzzaux3*np.cos(2*phi+beta))*r_squared) #In sbcalc, there's", "= int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y) - strain_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[num_ind](X,Y)), np.isnan(strain_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y) -", "j in range(1,3): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y) - stress_aniso[num_ind](X,Y))", "k in strain: strain[k][np.abs(X) > a/2] = np.nan strain[k][np.abs(Y) > b/2] = np.nan", "= 500.0 L = 100.0 S = np.zeros((6,6)) #The elastic matrix for isotropic", "C22, C31, C13, C40, C04, L = np.linalg.solve(A,B) stress['xx'] = (C02 + C22*X**2", "key in strain: strain[key][mask < 0.5] = np.nan stress_imp, strain_imp, P_imp = anisotropic_rectangular(Rx,", "S[2,0] = -nu S[2,1] = -nu S[3,3] = 2*(1+nu) S[4,4] = 2*(1+nu) S[5,5]", "A.append([0, 0, 0, 2*S[0,1] + S[5,5], -2*S[1,5], -2*S[0,5], S[1,1], S[0,0], 0]) A =", "165 nu = 0.22 thickness = 0.1 Rx = 1000.0 Ry = 500.0", "= {} strain = {} stress['xx'] = -E/(16*R**2)*(X**2 + 3*Y**2 -L**2/4) stress['yy'] =", "(((3+nu)/2+5*(b/a)**2+(1-nu)/2*(b/a)**4)*(X**2 - a**2/12)+\\ ((3+nu)/2+5*(a/b)**2+(1-nu)/2*(a/b)**4)*(Y**2 - b**2/12)) for k in stress: stress[k][np.abs(X) > a/2]", "np sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..')) from tbcalc.transverse_deformation import * from pyTTE import TTcrystal, Quantity def", "np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain['zz'] - strain_imp['zz'](X,Y)) < meps, np.logical_and(np.isnan(strain['zz']), np.isnan(strain_imp['zz'](X,Y))))) #check the contact", "#COMPARE THE REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_rectangular(Rx, Ry, a,", "k in strain: strain[k][X**2+Y**2 > L**2/4] = np.nan stress_imp, strain_imp, P_imp = anisotropic_circular(Rx,", "S[0,0]*b**2, 0]) #dL/C02 A.append([-12*S[1,5], 12*S[1,1], 12*S[1,0], (S[1,0]*a**2 + S[1,1]*b**2), -S[1,5]*a**2, -S[1,5]*b**2, S[1,1]*a**2, S[0,1]*b**2,", "+ 5*S[1,5]*a**2*b**2/12), S[1,1]*a**2*b**2/3 + 3*S[5,5]*a**4/20, (S[0,1]/3 + S[5,5]/12)*a**2*b**2, -3*S[1,5]*a**4/20, - S[0,5]*a**2*b**2/12, -2*S[1,5]]) #dL/C31", "np.logical_and(np.isnan(strain_iso[str_ind](X,Y)), np.isnan(strain_aniso[str_ind](X,Y))))) #Check contact forces assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y)))))", "#sbcalc has a typo on this line (corrected here) stress['xy'] = 2*E/(g*R**2)*X*Y stress['yx']", "in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in [1,3]: for j in [1,3]:", "from sbcalc strain['xz'] = X*0 strain['zx'] = X*0 strain['yz'] = X*0 strain['zy'] =", "rows #[C11, C20, C02, C22, C31, C13, C40, C04, lambda] A = []", "= np.finfo(np.float).eps #machine epsilon #add int indexing int2char_ind = ['','x','y','z'] for i in", "L**2/4] = np.nan #add int indexing int2char_ind = ['','x','y','z'] for i in range(1,3):", "L = 100.0 x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) stress = {} strain = {} stress['xx'] =", "def test_isotropic_circular(): #Calculate the reference stresses and strains as implemented in the #deprecated", "+ S[1,5]*b**2), S[5,5]*a**2, S[5,5]*b**2, -S[1,5]*a**2, -S[0,5]*b**2, 0]) #dL/C11 A.append([-12*S[0,5], 12*S[0,1], 12*S[0,0], (S[0,0]*a**2 +", "3*S[5,5]*a**4/20, (S[0,1]/3 + S[5,5]/12)*a**2*b**2, -3*S[1,5]*a**4/20, - S[0,5]*a**2*b**2/12, -2*S[1,5]]) #dL/C31 A.append([S[5,5]*b**2, -S[1,5]*b**2, -S[0,5]*b**2, -(3*S[1,5]*b**4/20", "> a/2] = np.nan stress[k][np.abs(Y) > b/2] = np.nan for k in strain:", "S[0,1]*a**2*b**2/12, 3*S[0,0]*b**4/20, S[0,0]]) #dL/C04 A.append([0, 0, 0, 2*S[0,1] + S[5,5], -2*S[1,5], -2*S[0,5], S[1,1],", "= 0 for key in stress: stress[key][mask < 0.5] = np.nan for key", "stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(1,4): for j in range(1,4):", "np.nan for key in strain: strain[key][mask < 0.5] = np.nan stress_imp, strain_imp, P_imp", "strain[int2char_ind[i]+int2char_ind[j]] meps = np.finfo(np.float).eps #machine epsilon for i in range(1,3): for j in", "b B = np.array([0,0,0,0,0,0,0,0,-0.5]) #Construction of A matrix rows #[C11, C20, C02, C22,", "meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(1,4): for j in range(1,4): num_ind =", "0.1 Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) a = 100.0", "A.append([-S[1,5]*a**2, S[1,1]*a**2, S[0,1]*a**2, S[1,1]*a**2*b**2/12 + 3*S[0,1]*a**4/20, -3*S[1,5]*a**4/20, -S[1,5]*a**2*b**2/12, 3*S[1,1]*a**4/20, S[0,1]*a**2*b**2/12, S[1,1]]) #dL/C40 A.append([-S[0,5]*b**2,", "= 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) L = 100.0 x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x)", "stress['yy'] = E/(g*R**2) * (b**2/12-Y**2 + ((1+nu)/2 + 5*(b/a)**2 + (1-nu)/2*(b/a)**4)*(a**2/12-X**2)) #sbcalc has", "L, thickness, nu, E) meps = np.finfo(np.float).eps #machine epsilon for i in range(1,3):", "-S[1,5]*a**2, -S[0,5]*a**2, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), S[1,1]*a**2*b**2/3 + 3*S[5,5]*a**4/20, (S[0,1]/3 + S[5,5]/12)*a**2*b**2, -3*S[1,5]*a**4/20, -", "for j in range(1,3): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y) -", "L**2/4] = np.nan stress_imp, strain_imp, P_imp = anisotropic_circular(Rx, Ry, L, thickness, S) meps", "in strain: strain[key][mask < 0.5] = np.nan stress_imp, strain_imp, P_imp = anisotropic_rectangular(Rx, Ry,", "Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) a = 100.0 b", "meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in [1,3]: for j in [1,3]: if i==1", "100.0 x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) stress = {} strain = {} stress['xx'] = -E/(16*R**2)*(X**2 +", "#add int indexing int2char_ind = ['','x','y','z'] for i in range(1,3): for j in", "x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) stress = {} strain = {} g = 8 + 10*((a/b)**2+(b/a)**2)", "-3*S[0,5]*b**4/20, -2*S[0,5]]) #dL/C13 A.append([-S[1,5]*a**2, S[1,1]*a**2, S[0,1]*a**2, S[1,1]*a**2*b**2/12 + 3*S[0,1]*a**4/20, -3*S[1,5]*a**4/20, -S[1,5]*a**2*b**2/12, 3*S[1,1]*a**4/20, S[0,1]*a**2*b**2/12,", "#dL/C04 A.append([0, 0, 0, 2*S[0,1] + S[5,5], -2*S[1,5], -2*S[0,5], S[1,1], S[0,0], 0]) A", "= S/E stress_iso, strain_iso, P_iso = isotropic_circular(Rx, Ry, L, thickness, nu, E) stress_aniso,", "a**2/12)+\\ ((3+nu)/2+5*(a/b)**2+(1-nu)/2*(a/b)**4)*(Y**2 - b**2/12)) for k in stress: stress[k][np.abs(X) > a/2] = np.nan", "in strain: strain[k][X**2+Y**2 > L**2/4] = np.nan #add int indexing int2char_ind = ['','x','y','z']", "= np.finfo(np.float).eps #machine epsilon for i in range(1,3): for j in range(1,3): num_ind", "stress_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[num_ind](X,Y)), np.isnan(stress_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y) - stress_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[str_ind](X,Y)), np.isnan(stress_aniso[str_ind](X,Y)))))", "A.append([-12*S[0,5], 12*S[0,1], 12*S[0,0], (S[0,0]*a**2 + S[0,1]*b**2), -S[0,5]*a**2, -S[0,5]*b**2, S[0,1]*a**2, S[0,0]*b**2, 0]) #dL/C02 A.append([-12*S[1,5],", "range(1,4): for j in range(1,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind]", "= 100.0 b = 50.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) stress = {} strain = {}", "- stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(1,4): for j in", "uzzaux3 = np.sqrt((S[2,1]-S[2,0])**2+S[2,5]**2) beta = np.arctan2(S[2,5],(S[2,1]-S[2,0])) strain['zz'] = D*(uzzaux1 - (uzzaux2+uzzaux3*np.cos(2*phi+beta))*r_squared) #In sbcalc,", "-nu S[2,0] = -nu S[2,1] = -nu S[3,3] = 2*(1+nu) S[4,4] = 2*(1+nu)", "+ S[0,1]*b**2, (S[0,1]+2*S[5,5])*a**2*b**2/6 + 3*(S[0,0]*a**4 + S[1,1]*b**4)/20, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12),", "np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(1,4): for j in range(1,4): num_ind = i*10+j", "ttx.S.in_units('GPa^-1') thickness = 0.1 Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry)", "= E/(g*R**2) * (b**2/12-Y**2 + ((1+nu)/2 + 5*(b/a)**2 + (1-nu)/2*(b/a)**4)*(a**2/12-X**2)) #sbcalc has a", "= [] A.append([12*S[5,5], -12*S[1,5], -12*S[0,5], -(S[0,5]*a**2 + S[1,5]*b**2), S[5,5]*a**2, S[5,5]*b**2, -S[1,5]*a**2, -S[0,5]*b**2, 0])", "meps, np.logical_and(np.isnan(strain_iso[num_ind](X,Y)), np.isnan(strain_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y) - strain_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[str_ind](X,Y)), np.isnan(strain_aniso[str_ind](X,Y))))) #Check contact", "forces assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_anisotropic_rectangular_vs_old_Version(): #For compliance", "np.nan stress_imp, strain_imp, P_imp = anisotropic_rectangular(Rx, Ry, a, b, thickness, S) #add int", "in range(1,3): for j in range(1,3): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert", "= i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y) - stress_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[num_ind](X,Y)), np.isnan(stress_aniso[num_ind](X,Y)))))", "implemented in the #deprecated sbcalc package E = 165 nu = 0.22 thickness", "in range(1,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) <", "assert np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y) - stress_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[num_ind](X,Y)), np.isnan(stress_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y) - stress_aniso[str_ind](X,Y)) <", "Rx = 1000.0 Ry = 500.0 a = 100.0 b = 100.0 S", ") strain['zx'] = strain['xz'].copy() #Apply mask mask = np.ones(X.shape) mask[np.abs(X)>a/2] = 0 mask[np.abs(Y)>b/2]", "sbcalc package E = 165 nu = 0.22 thickness = 0.1 Rx =", "range(1,3): for j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in range(1,4): for", "+ 2*C31*X*Y + C40*X**2)/R**2 stress['xy'] = -(C11 + 2*C22*X*Y + C31*X**2 + C13*Y**2)/R**2", "import numpy as np sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..')) from tbcalc.transverse_deformation import * from pyTTE import", "+ S[0,0]*a**2*b**2/12, 2*S[0,1] + S[5,5]]) #dL/C22 A.append([S[5,5]*a**2, -S[1,5]*a**2, -S[0,5]*a**2, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), S[1,1]*a**2*b**2/3", "+ 5*(a/b)**2 + (1-nu)/2*(a/b)**4)*(b**2/12-Y**2)) stress['yy'] = E/(g*R**2) * (b**2/12-Y**2 + ((1+nu)/2 + 5*(b/a)**2", "for j in range(1,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y) -", "S[1,1]*a**2, S[0,1]*a**2, S[1,1]*a**2*b**2/12 + 3*S[0,1]*a**4/20, -3*S[1,5]*a**4/20, -S[1,5]*a**2*b**2/12, 3*S[1,1]*a**4/20, S[0,1]*a**2*b**2/12, S[1,1]]) #dL/C40 A.append([-S[0,5]*b**2, S[0,1]*b**2,", "stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in [1,3]: for j in [1,3]:", "meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_anisotropic_rectangular_vs_old_Version(): #For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,3], thickness=Quantity(0.1,'mm')) a", "compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,3], thickness=Quantity(0.1,'mm')) a = 100.0 b = 100.0 x=np.linspace(-a/2,a/2,150)", "Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) L = 100.0 #Calculate", "-nu S[2,1] = -nu S[3,3] = 2*(1+nu) S[4,4] = 2*(1+nu) S[5,5] = 2*(1+nu)", "< 0.5] = np.nan for key in strain: strain[key][mask < 0.5] = np.nan", "mask[np.abs(Y)>b/2] = 0 for key in stress: stress[key][mask < 0.5] = np.nan for", "np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y))))) #check the contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y))", "np.arctan2(S[2,5],(S[2,1]-S[2,0])) strain['zz'] = D*(uzzaux1 - (uzzaux2+uzzaux3*np.cos(2*phi+beta))*r_squared) #In sbcalc, there's incorrectly sin instead of", "for j in range(1,3): str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps,", "< meps, np.logical_and(np.isnan(strain_iso[num_ind](X,Y)), np.isnan(strain_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y) - strain_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[str_ind](X,Y)), np.isnan(strain_aniso[str_ind](X,Y))))) #Check", "S = ttx.S.in_units('GPa^-1') stress = {} strain = {} #Numerical solution through solving", "b/2] = np.nan #add int indexing int2char_ind = ['','x','y','z'] for i in range(1,3):", "strain = {} #Numerical solution through solving a linear system A*x = b", "P_imp = anisotropic_circular(Rx, Ry, L, thickness, S) meps = np.finfo(np.float).eps #machine epsilon #add", "+ ((1+nu)/2 + 5*(b/a)**2 + (1-nu)/2*(b/a)**4)*(a**2/12-X**2)) #sbcalc has a typo on this line", "= -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_sbcalc(): #For", "stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain['zz'] - strain_imp['zz'](X,Y)) < meps, np.logical_and(np.isnan(strain['zz']), np.isnan(strain_imp['zz'](X,Y)))))", "i in range(1,3): for j in range(1,3): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j]", "+ Y**2 -L**2/4) stress['xy'] = E/(8*R**2)*X*Y stress['yx'] = stress['xy'] strain['xx'] = ((1-nu)*L**2/4-(1-3*nu)*X**2-(3-nu)*Y**2)/(16*R**2) strain['yy']", "np.isnan(stress_imp[str_ind](X,Y))))) for i in range(3,4): for j in range(3,4): num_ind = i*10+j str_ind", "b, thickness, S) x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon int2char_ind = ['','x','y','z']", "forces assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_isotropic_rectangular(): #Calculate the", "(S[0,0]*a**2 + S[0,1]*b**2), -S[0,5]*a**2, -S[0,5]*b**2, S[0,1]*a**2, S[0,0]*b**2, 0]) #dL/C02 A.append([-12*S[1,5], 12*S[1,1], 12*S[1,0], (S[1,0]*a**2", "= ['','x','y','z'] for i in range(1,3): for j in range(1,3): str_ind = int2char_ind[i]+int2char_ind[j]", "-(S[0,5]*a**2 + S[1,5]*b**2), S[5,5]*a**2, S[5,5]*b**2, -S[1,5]*a**2, -S[0,5]*b**2, 0]) #dL/C11 A.append([-12*S[0,5], 12*S[0,1], 12*S[0,0], (S[0,0]*a**2", "for k in strain: strain[k][X**2+Y**2 > L**2/4] = np.nan stress_imp, strain_imp, P_imp =", "package E = 165 nu = 0.22 thickness = 0.1 Rx = 1000.0", "A.append([-12*S[1,5], 12*S[1,1], 12*S[1,0], (S[1,0]*a**2 + S[1,1]*b**2), -S[1,5]*a**2, -S[1,5]*b**2, S[1,1]*a**2, S[0,1]*b**2, 0]) #dL/C20 A.append([-(S[0,5]*a**2", "str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain[str_ind]", "S[0,1]*b**2, (S[0,1]+2*S[5,5])*a**2*b**2/6 + 3*(S[0,0]*a**4 + S[1,1]*b**4)/20, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), 3*S[0,1]*a**4/20", "C40*X**2)/R**2 stress['xy'] = -(C11 + 2*C22*X*Y + C31*X**2 + C13*Y**2)/R**2 stress['yx'] = stress['xy'].copy()", "isotropic crystal S[0,0] = 1 S[1,1] = 1 S[2,2] = 1 S[0,1] =", "X*0 strain['zy'] = X*0 for k in stress: stress[k][X**2+Y**2 > L**2/4] = np.nan", "S[3,3] = 2*(1+nu) S[4,4] = 2*(1+nu) S[5,5] = 2*(1+nu) S = S/E stress_iso,", "= -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_rectangular_vs_isotropic_rectangular(): E", "+ 5*S[1,5]*a**2*b**2/12), -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), 3*S[0,1]*a**4/20 + S[1,1]*a**2*b**2/12, 3*S[0,1]*b**4/20 + S[0,0]*a**2*b**2/12, 2*S[0,1] +", "a, b, thickness, nu, E) meps = np.finfo(np.float).eps #machine epsilon for i in", "C40, C04, lambda] A = [] A.append([12*S[5,5], -12*S[1,5], -12*S[0,5], -(S[0,5]*a**2 + S[1,5]*b**2), S[5,5]*a**2,", "L, thickness, S) x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon int2char_ind = ['','x','y','z']", "((3+nu)/2+5*(a/b)**2+(1-nu)/2*(a/b)**4)*(Y**2 - b**2/12)) for k in stress: stress[k][np.abs(X) > a/2] = np.nan stress[k][np.abs(Y)", "ttx = TTcrystal(crystal='Si',hkl=[9,5,1], thickness=Quantity(0.1,'mm')) S = ttx.S.in_units('GPa^-1') thickness = 0.1 Rx = 1000.0", "P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_sbcalc(): #For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,1],", "-S[1,5]*b**2, S[1,1]*a**2, S[0,1]*b**2, 0]) #dL/C20 A.append([-(S[0,5]*a**2 + S[1,5]*b**2), S[0,1]*a**2 + S[1,1]*b**2, S[0,0]*a**2 +", "thickness = 0.1 Rx = 1000.0 Ry = 500.0 L = 100.0 S", "if i==1 and j == 1: continue num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j]", "= np.linalg.solve(A,B) stress['xx'] = (C02 + C22*X**2 + 2*C13*X*Y + C04*Y**2)/R**2 stress['yy'] =", "[1,3]: if i==1 and j == 1: continue strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] meps =", "meps, np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i", "#COMPARE THE REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_circular(Rx, Ry, L,", "L, thickness, S) meps = np.finfo(np.float).eps #machine epsilon #add int indexing int2char_ind =", "+ S[1,1]*b**2, S[0,0]*a**2 + S[0,1]*b**2, (S[0,1]+2*S[5,5])*a**2*b**2/6 + 3*(S[0,0]*a**4 + S[1,1]*b**4)/20, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12),", "0.22 thickness = 0.1 Rx = 1000.0 Ry = 500.0 a = 100.0", "@author: aripekka \"\"\" import sys import os.path import numpy as np sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..'))", "in range(1,4): for j in range(1,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO", "= stress['xy'] strain['xx'] = ((1-nu)*L**2/4-(1-3*nu)*X**2-(3-nu)*Y**2)/(16*R**2) strain['yy'] = ((1-nu)*L**2/4-(1-3*nu)*Y**2-(3-nu)*X**2)/(16*R**2) strain['xy'] = (1+nu)/(8*R**2)*X*Y strain['yx'] =", "stress['xy'] #shorthand notation uzzaux1 = (S[2,0]+S[2,1])*L**2/4 uzzaux2 = 2*(S[2,0]+S[2,1]) uzzaux3 = np.sqrt((S[2,1]-S[2,0])**2+S[2,5]**2) beta", "contact forces assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_isotropic_rectangular(): #Calculate", "S[0,0]*a**2*b**2/12, 2*S[0,1] + S[5,5]]) #dL/C22 A.append([S[5,5]*a**2, -S[1,5]*a**2, -S[0,5]*a**2, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), S[1,1]*a**2*b**2/3 +", "C02, C22, C31, C13, C40, C04, lambda] A = [] A.append([12*S[5,5], -12*S[1,5], -12*S[0,5],", "j in range(1,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y))", "= D*(L**2/4-3*X**2-Y**2) stress['xy'] = 2*D*X*Y stress['yx'] = stress['xy'] #shorthand notation uzzaux1 = (S[2,0]+S[2,1])*L**2/4", "IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E) meps", "D = 1/(2*R**2*(3*(S[0,0]+S[1,1])+2*S[0,1]+S[5,5])) stress['xx'] = D*(L**2/4-X**2-3*Y**2) stress['yy'] = D*(L**2/4-3*X**2-Y**2) stress['xy'] = 2*D*X*Y stress['yx']", "nu = 0.22 thickness = 0.1 Rx = 1000.0 Ry = 500.0 L", "S[3,1]*stress['yy'] + S[3,5]*stress['xy'] ) strain['zx'] = strain['xz'].copy() #Apply mask mask = np.ones(X.shape) mask[np.abs(X)>a/2]", "-12*S[0,5], -(S[0,5]*a**2 + S[1,5]*b**2), S[5,5]*a**2, S[5,5]*b**2, -S[1,5]*a**2, -S[0,5]*b**2, 0]) #dL/C11 A.append([-12*S[0,5], 12*S[0,1], 12*S[0,0],", "assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_anisotropic_rectangular_vs_old_Version(): #For compliance matrix", "> a/2] = np.nan strain[k][np.abs(Y) > b/2] = np.nan #add int indexing int2char_ind", "= 0 mask[np.abs(Y)>b/2] = 0 for key in stress: stress[key][mask < 0.5] =", "incorrectly sin instead of cos for k in stress: stress[k][X**2+Y**2 > L**2/4] =", "< meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_anisotropic_rectangular_vs_old_Version(): #For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,3], thickness=Quantity(0.1,'mm'))", "5*S[0,5]*a**2*b**2/12), 3*S[0,1]*a**4/20 + S[1,1]*a**2*b**2/12, 3*S[0,1]*b**4/20 + S[0,0]*a**2*b**2/12, 2*S[0,1] + S[5,5]]) #dL/C22 A.append([S[5,5]*a**2, -S[1,5]*a**2,", "Rx = 1000.0 Ry = 500.0 L = 100.0 S = np.zeros((6,6)) #The", "for k in stress: stress[k][X**2+Y**2 > L**2/4] = np.nan for k in strain:", "S[5,5]]) #dL/C22 A.append([S[5,5]*a**2, -S[1,5]*a**2, -S[0,5]*a**2, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), S[1,1]*a**2*b**2/3 + 3*S[5,5]*a**4/20, (S[0,1]/3 +", "def test_anisotropic_circular_vs_isotropic_circular(): E = 165 nu = 0.22 thickness = 0.1 Rx =", "(S[0,1]/3 + S[5,5]/12)*a**2*b**2, -3*S[1,5]*a**4/20, - S[0,5]*a**2*b**2/12, -2*S[1,5]]) #dL/C31 A.append([S[5,5]*b**2, -S[1,5]*b**2, -S[0,5]*b**2, -(3*S[1,5]*b**4/20 +", "S[1,0] = -nu S[2,0] = -nu S[2,1] = -nu S[3,3] = 2*(1+nu) S[4,4]", "thickness = 0.1 Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) a", "- (uzzaux2+uzzaux3*np.cos(2*phi+beta))*r_squared) #In sbcalc, there's incorrectly sin instead of cos for k in", "E/(8*R**2)*X*Y stress['yx'] = stress['xy'] strain['xx'] = ((1-nu)*L**2/4-(1-3*nu)*X**2-(3-nu)*Y**2)/(16*R**2) strain['yy'] = ((1-nu)*L**2/4-(1-3*nu)*Y**2-(3-nu)*X**2)/(16*R**2) strain['xy'] = (1+nu)/(8*R**2)*X*Y", "the transverse deformation functions. Run with pytest. Created on Sat May 9 00:09:00", "j in range(1,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE IMPLEMENTATION stress_imp,", "has a typo on this line (corrected here) stress['xy'] = 2*E/(g*R**2)*X*Y stress['yx'] =", "a/2] = np.nan stress[k][np.abs(Y) > b/2] = np.nan for k in strain: strain[k][np.abs(X)", "np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_rectangular_vs_isotropic_rectangular(): E = 165 nu = 0.22 thickness = 0.1", "stress['xy'] = -(C11 + 2*C22*X*Y + C31*X**2 + C13*Y**2)/R**2 stress['yx'] = stress['xy'].copy() strain['zz']", "matrix for isotropic crystal S[0,0] = 1 S[1,1] = 1 S[2,2] = 1", "= np.sqrt(Rx*Ry) a = 100.0 b = 50.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) stress = {}", "in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in range(3,4): for j in range(3,4):", "beta = np.arctan2(S[2,5],(S[2,1]-S[2,0])) strain['zz'] = D*(uzzaux1 - (uzzaux2+uzzaux3*np.cos(2*phi+beta))*r_squared) #In sbcalc, there's incorrectly sin", "- S[0,5]*a**2*b**2/12, -2*S[1,5]]) #dL/C31 A.append([S[5,5]*b**2, -S[1,5]*b**2, -S[0,5]*b**2, -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), (S[0,1]/3 + S[5,5]/12)*a**2*b**2,", "+ (1-nu)*((a/b)**2-(b/a)**2)**2 stress['xx'] = E/(g*R**2) * (a**2/12-X**2 + ((1+nu)/2 + 5*(a/b)**2 + (1-nu)/2*(a/b)**4)*(b**2/12-Y**2))", "= 2*(1+nu) S[4,4] = 2*(1+nu) S[5,5] = 2*(1+nu) S = S/E stress_iso, strain_iso,", "100.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) thickness = 0.1 Rx = 1000.0 Ry = 500.0 R", "range(1,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) < meps,", "strain = {} D = 1/(2*R**2*(3*(S[0,0]+S[1,1])+2*S[0,1]+S[5,5])) stress['xx'] = D*(L**2/4-X**2-3*Y**2) stress['yy'] = D*(L**2/4-3*X**2-Y**2) stress['xy']", "this line (corrected here) stress['xy'] = 2*E/(g*R**2)*X*Y stress['yx'] = stress['xy'] strain['zz'] = nu/(g*R**2)", "\"\"\" import sys import os.path import numpy as np sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..')) from tbcalc.transverse_deformation", "import sys import os.path import numpy as np sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..')) from tbcalc.transverse_deformation import", "-(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), 3*S[0,1]*a**4/20 + S[1,1]*a**2*b**2/12, 3*S[0,1]*b**4/20 + S[0,0]*a**2*b**2/12, 2*S[0,1] + S[5,5]]) #dL/C22", "for i in [1,3]: for j in [1,3]: if i==1 and j ==", "{} strain = {} D = 1/(2*R**2*(3*(S[0,0]+S[1,1])+2*S[0,1]+S[5,5])) stress['xx'] = D*(L**2/4-X**2-3*Y**2) stress['yy'] = D*(L**2/4-3*X**2-Y**2)", "= 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) S = ttx.S.in_units('GPa^-1') stress =", "np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_isotropic_rectangular(): #Calculate the reference stresses", "S[5,5]/12)*a**2*b**2, -3*S[1,5]*a**4/20, - S[0,5]*a**2*b**2/12, -2*S[1,5]]) #dL/C31 A.append([S[5,5]*b**2, -S[1,5]*b**2, -S[0,5]*b**2, -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), (S[0,1]/3", "{} strain = {} stress['xx'] = -E/(16*R**2)*(X**2 + 3*Y**2 -L**2/4) stress['yy'] = -E/(16*R**2)*(3*X**2", "= np.finfo(np.float).eps #machine epsilon int2char_ind = ['','x','y','z'] meps = np.finfo(np.float).eps #machine epsilon #Check", "range(3,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) < meps,", "np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_isotropic_circular(): E = 165 nu = 0.22 thickness = 0.1", "strain['zy'] = X*0 for k in stress: stress[k][X**2+Y**2 > L**2/4] = np.nan for", "#Check stresses for i in range(1,3): for j in range(1,3): num_ind = i*10+j", "continue num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) < meps,", "= 100.0 S = np.zeros((6,6)) #The elastic matrix for isotropic crystal S[0,0] =", "assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(1,4): for", "functions. Run with pytest. Created on Sat May 9 00:09:00 2020 @author: aripekka", "= np.nan stress_imp, strain_imp, P_imp = anisotropic_circular(Rx, Ry, L, thickness, S) meps =", "+ 2*C22*X*Y + C31*X**2 + C13*Y**2)/R**2 stress['yx'] = stress['xy'].copy() strain['zz'] = S[2,0]*stress['xx'] +", "meps, np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y))))) #check the contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P -", "meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_isotropic_rectangular(): #Calculate the reference stresses and strains as implemented", "as np sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..')) from tbcalc.transverse_deformation import * from pyTTE import TTcrystal, Quantity", "strain['yy'] = ((1-nu)*L**2/4-(1-3*nu)*Y**2-(3-nu)*X**2)/(16*R**2) strain['xy'] = (1+nu)/(8*R**2)*X*Y strain['yx'] = strain['xy'] strain['zz'] = nu/(4*R**2)*(X**2+Y**2-L**2/8) #missing", "1: continue num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) <", "= -nu S[3,3] = 2*(1+nu) S[4,4] = 2*(1+nu) S[5,5] = 2*(1+nu) S =", "#Calculate the reference stresses and strains as implemented in the #deprecated sbcalc package", "P_imp = isotropic_circular(Rx, Ry, L, thickness, nu, E) meps = np.finfo(np.float).eps #machine epsilon", "{} strain = {} g = 8 + 10*((a/b)**2+(b/a)**2) + (1-nu)*((a/b)**2-(b/a)**2)**2 stress['xx'] =", "2020 @author: aripekka \"\"\" import sys import os.path import numpy as np sys.path.insert(1,", "2*S[0,1] + S[5,5]]) #dL/C22 A.append([S[5,5]*a**2, -S[1,5]*a**2, -S[0,5]*a**2, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), S[1,1]*a**2*b**2/3 + 3*S[5,5]*a**4/20,", "1000.0 Ry = 500.0 a = 100.0 b = 100.0 S = np.zeros((6,6))", "P_aniso = anisotropic_rectangular(Rx, Ry, a, b, thickness, S) x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps", "#dL/C22 A.append([S[5,5]*a**2, -S[1,5]*a**2, -S[0,5]*a**2, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), S[1,1]*a**2*b**2/3 + 3*S[5,5]*a**4/20, (S[0,1]/3 + S[5,5]/12)*a**2*b**2,", "in strain: strain[k][np.abs(X) > a/2] = np.nan strain[k][np.abs(Y) > b/2] = np.nan #add", "C31*X**2 + C13*Y**2)/R**2 stress['yx'] = stress['xy'].copy() strain['zz'] = S[2,0]*stress['xx'] + S[2,1]*stress['yy'] + S[2,5]*stress['xy']", "[] A.append([12*S[5,5], -12*S[1,5], -12*S[0,5], -(S[0,5]*a**2 + S[1,5]*b**2), S[5,5]*a**2, S[5,5]*b**2, -S[1,5]*a**2, -S[0,5]*b**2, 0]) #dL/C11", "0 for key in stress: stress[key][mask < 0.5] = np.nan for key in", "S[0,0] = 1 S[1,1] = 1 S[2,2] = 1 S[0,1] = -nu S[0,2]", "np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(3,4): for j", "= 100.0 b = 100.0 S = np.zeros((6,6)) #The elastic matrix for isotropic", "1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) S = ttx.S.in_units('GPa^-1') stress = {}", "+ (1-nu)/2*(a/b)**4)*(b**2/12-Y**2)) stress['yy'] = E/(g*R**2) * (b**2/12-Y**2 + ((1+nu)/2 + 5*(b/a)**2 + (1-nu)/2*(b/a)**4)*(a**2/12-X**2))", "S = ttx.S.in_units('GPa^-1') thickness = 0.1 Rx = 1000.0 Ry = 500.0 R", "Ry = 500.0 R = np.sqrt(Rx*Ry) S = ttx.S.in_units('GPa^-1') stress = {} strain", "C20, C02, C22, C31, C13, C40, C04, lambda] A = [] A.append([12*S[5,5], -12*S[1,5],", "stress['yx'] = stress['xy'] #shorthand notation uzzaux1 = (S[2,0]+S[2,1])*L**2/4 uzzaux2 = 2*(S[2,0]+S[2,1]) uzzaux3 =", "aripekka \"\"\" import sys import os.path import numpy as np sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..')) from", "1/(2*R**2*(3*(S[0,0]+S[1,1])+2*S[0,1]+S[5,5])) stress['xx'] = D*(L**2/4-X**2-3*Y**2) stress['yy'] = D*(L**2/4-3*X**2-Y**2) stress['xy'] = 2*D*X*Y stress['yx'] = stress['xy']", "-(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), S[1,1]*a**2*b**2/3 + 3*S[5,5]*a**4/20, (S[0,1]/3 + S[5,5]/12)*a**2*b**2, -3*S[1,5]*a**4/20, - S[0,5]*a**2*b**2/12, -2*S[1,5]])", "range(3,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp", "np.nan for k in strain: strain[k][X**2+Y**2 > L**2/4] = np.nan stress_imp, strain_imp, P_imp", "S/E stress_iso, strain_iso, P_iso = isotropic_circular(Rx, Ry, L, thickness, nu, E) stress_aniso, strain_aniso,", "= X*0 strain['yz'] = X*0 strain['zy'] = X*0 for k in stress: stress[k][X**2+Y**2", "0, 0, 2*S[0,1] + S[5,5], -2*S[1,5], -2*S[0,5], S[1,1], S[0,0], 0]) A = np.array(A)", "Ry = 500.0 R = np.sqrt(Rx*Ry) L = 100.0 x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) stress =", "((1-nu)*L**2/4-(1-3*nu)*Y**2-(3-nu)*X**2)/(16*R**2) strain['xy'] = (1+nu)/(8*R**2)*X*Y strain['yx'] = strain['xy'] strain['zz'] = nu/(4*R**2)*(X**2+Y**2-L**2/8) #missing zero strains", "stress[int2char_ind[i]+int2char_ind[j]] for i in range(1,4): for j in range(1,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE", "np.isnan(strain_imp['zz'](X,Y))))) #check the contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) <", "= -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_isotropic_circular(): E", "through solving a linear system A*x = b B = np.array([0,0,0,0,0,0,0,0,-0.5]) #Construction of", "S[0,5]*a**2*b**2/12, -2*S[1,5]]) #dL/C31 A.append([S[5,5]*b**2, -S[1,5]*b**2, -S[0,5]*b**2, -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), (S[0,1]/3 + S[5,5]/12)*a**2*b**2, S[0,0]*a**2*b**2/3", "meps, np.logical_and(np.isnan(strain['zz']), np.isnan(strain_imp['zz'](X,Y))))) #check the contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P -", "np.isnan(stress_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in [1,3]:", "== 1: continue num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y))", "-S[0,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, S[0,1]*a**2*b**2/12, 3*S[0,0]*b**4/20, S[0,0]]) #dL/C04 A.append([0, 0, 0, 2*S[0,1] + S[5,5], -2*S[1,5],", "for isotropic crystal S[0,0] = 1 S[1,1] = 1 S[2,2] = 1 S[0,1]", "np.isnan(strain_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y) - strain_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[str_ind](X,Y)), np.isnan(strain_aniso[str_ind](X,Y))))) #Check contact forces assert", "S[0,1]*b**2, 0]) #dL/C20 A.append([-(S[0,5]*a**2 + S[1,5]*b**2), S[0,1]*a**2 + S[1,1]*b**2, S[0,0]*a**2 + S[0,1]*b**2, (S[0,1]+2*S[5,5])*a**2*b**2/6", "Ry, a, b, thickness, nu, E) meps = np.finfo(np.float).eps #machine epsilon for i", "= strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_circular(Rx,", "= D*(uzzaux1 - (uzzaux2+uzzaux3*np.cos(2*phi+beta))*r_squared) #In sbcalc, there's incorrectly sin instead of cos for", "stress_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[str_ind](X,Y)), np.isnan(stress_aniso[str_ind](X,Y))))) #Check strains for i in range(1,4): for j", "np.logical_and(np.isnan(strain['zz']), np.isnan(strain_imp['zz'](X,Y))))) #check the contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y))", "+ S[1,1]*b**2), -S[1,5]*a**2, -S[1,5]*b**2, S[1,1]*a**2, S[0,1]*b**2, 0]) #dL/C20 A.append([-(S[0,5]*a**2 + S[1,5]*b**2), S[0,1]*a**2 +", "strain['xy'] = (1+nu)/(8*R**2)*X*Y strain['yx'] = strain['xy'] strain['zz'] = nu/(4*R**2)*(X**2+Y**2-L**2/8) #missing zero strains from", "x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) r_squared = X**2+Y**2 phi = np.arctan2(Y,X) stress = {} strain =", "= X**2+Y**2 phi = np.arctan2(Y,X) stress = {} strain = {} D =", "X**2+Y**2 phi = np.arctan2(Y,X) stress = {} strain = {} D = 1/(2*R**2*(3*(S[0,0]+S[1,1])+2*S[0,1]+S[5,5]))", "THE REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_circular(Rx, Ry, L, thickness,", "assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_isotropic_rectangular(): #Calculate the reference", "- P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_isotropic_rectangular(): #Calculate the reference stresses and", "meps, np.logical_and(np.isnan(strain_iso[str_ind](X,Y)), np.isnan(strain_aniso[str_ind](X,Y))))) #Check contact forces assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)),", "9 00:09:00 2020 @author: aripekka \"\"\" import sys import os.path import numpy as", "#dL/C13 A.append([-S[1,5]*a**2, S[1,1]*a**2, S[0,1]*a**2, S[1,1]*a**2*b**2/12 + 3*S[0,1]*a**4/20, -3*S[1,5]*a**4/20, -S[1,5]*a**2*b**2/12, 3*S[1,1]*a**4/20, S[0,1]*a**2*b**2/12, S[1,1]]) #dL/C40", "S[0,0], 0]) A = np.array(A) C11, C20, C02, C22, C31, C13, C40, C04,", "C11, C20, C02, C22, C31, C13, C40, C04, L = np.linalg.solve(A,B) stress['xx'] =", "in range(1,4): for j in range(1,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert", "strain['zz'] = S[2,0]*stress['xx'] + S[2,1]*stress['yy'] + S[2,5]*stress['xy'] strain['xz'] = 0.5*(S[3,0]*stress['xx'] + S[3,1]*stress['yy'] +", "in stress: stress[key][mask < 0.5] = np.nan for key in strain: strain[key][mask <", "0.5] = np.nan stress_imp, strain_imp, P_imp = anisotropic_rectangular(Rx, Ry, a, b, thickness, S)", "system A*x = b B = np.array([0,0,0,0,0,0,0,0,-0.5]) #Construction of A matrix rows #[C11,", "meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_sbcalc(): #For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,1], thickness=Quantity(0.1,'mm')) S", "for key in stress: stress[key][mask < 0.5] = np.nan for key in strain:", "S) meps = np.finfo(np.float).eps #machine epsilon #add int indexing int2char_ind = ['','x','y','z'] for", "- stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain['zz'] - strain_imp['zz'](X,Y)) < meps, np.logical_and(np.isnan(strain['zz']),", "- stress_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[num_ind](X,Y)), np.isnan(stress_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y) - stress_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[str_ind](X,Y)),", "nu, E) meps = np.finfo(np.float).eps #machine epsilon for i in range(1,3): for j", "< meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_rectangular_vs_isotropic_rectangular(): E = 165 nu = 0.22 thickness", "np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in [1,3]: for j in [1,3]: if i==1 and", "epsilon for i in range(1,3): for j in range(1,3): num_ind = i*10+j str_ind", "['','x','y','z'] meps = np.finfo(np.float).eps #machine epsilon #Check stresses for i in range(1,3): for", "and j == 1: continue num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind]", "stress_imp, strain_imp, P_imp = isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E) meps =", "L = np.linalg.solve(A,B) stress['xx'] = (C02 + C22*X**2 + 2*C13*X*Y + C04*Y**2)/R**2 stress['yy']", "= strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_rectangular(Rx,", "= np.nan stress[k][np.abs(Y) > b/2] = np.nan for k in strain: strain[k][np.abs(X) >", "-S[1,5]*a**2, -S[1,5]*b**2, S[1,1]*a**2, S[0,1]*b**2, 0]) #dL/C20 A.append([-(S[0,5]*a**2 + S[1,5]*b**2), S[0,1]*a**2 + S[1,1]*b**2, S[0,0]*a**2", "P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_anisotropic_rectangular_vs_old_Version(): #For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,3],", "{} strain = {} #Numerical solution through solving a linear system A*x =", "#dL/C11 A.append([-12*S[0,5], 12*S[0,1], 12*S[0,0], (S[0,0]*a**2 + S[0,1]*b**2), -S[0,5]*a**2, -S[0,5]*b**2, S[0,1]*a**2, S[0,0]*b**2, 0]) #dL/C02", "= 2*(1+nu) S[5,5] = 2*(1+nu) S = S/E stress_iso, strain_iso, P_iso = isotropic_rectangular(Rx,", "stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in range(3,4): for j in range(3,4): strain[i*10+j] =", "k in strain: strain[k][X**2+Y**2 > L**2/4] = np.nan #add int indexing int2char_ind =", "R = np.sqrt(Rx*Ry) S = ttx.S.in_units('GPa^-1') stress = {} strain = {} #Numerical", "in range(1,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp,", "stress['yy'] = (C20 + C22*Y**2 + 2*C31*X*Y + C40*X**2)/R**2 stress['xy'] = -(C11 +", "strain['zx'] = strain['xz'].copy() #Apply mask mask = np.ones(X.shape) mask[np.abs(X)>a/2] = 0 mask[np.abs(Y)>b/2] =", "2*(1+nu) S[4,4] = 2*(1+nu) S[5,5] = 2*(1+nu) S = S/E stress_iso, strain_iso, P_iso", "stress = {} strain = {} g = 8 + 10*((a/b)**2+(b/a)**2) + (1-nu)*((a/b)**2-(b/a)**2)**2", "instead of cos for k in stress: stress[k][X**2+Y**2 > L**2/4] = np.nan for", "= {} g = 8 + 10*((a/b)**2+(b/a)**2) + (1-nu)*((a/b)**2-(b/a)**2)**2 stress['xx'] = E/(g*R**2) *", "np.isnan(stress_imp[str_ind](X,Y))))) for i in range(1,4): for j in range(1,4): num_ind = i*10+j str_ind", "Ry, L, thickness, nu, E) meps = np.finfo(np.float).eps #machine epsilon for i in", "100.0 b = 100.0 S = np.zeros((6,6)) #The elastic matrix for isotropic crystal", "0.5*(S[3,0]*stress['xx'] + S[3,1]*stress['yy'] + S[3,5]*stress['xy'] ) strain['zx'] = strain['xz'].copy() #Apply mask mask =", "i in [1,3]: for j in [1,3]: if i==1 and j == 1:", "thickness, nu, E) meps = np.finfo(np.float).eps #machine epsilon for i in range(1,3): for", "= D*(L**2/4-X**2-3*Y**2) stress['yy'] = D*(L**2/4-3*X**2-Y**2) stress['xy'] = 2*D*X*Y stress['yx'] = stress['xy'] #shorthand notation", "strain['zx'] = X*0 strain['yz'] = X*0 strain['zy'] = X*0 for k in stress:", "= b B = np.array([0,0,0,0,0,0,0,0,-0.5]) #Construction of A matrix rows #[C11, C20, C02,", "0]) A = np.array(A) C11, C20, C02, C22, C31, C13, C40, C04, L", "strain_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[str_ind](X,Y)), np.isnan(strain_aniso[str_ind](X,Y))))) #Check contact forces assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) <", "= 0.5*(S[3,0]*stress['xx'] + S[3,1]*stress['yy'] + S[3,5]*stress['xy'] ) strain['zx'] = strain['xz'].copy() #Apply mask mask", "stresses for i in range(1,3): for j in range(1,3): num_ind = i*10+j str_ind", "strain[k][np.abs(X) > a/2] = np.nan strain[k][np.abs(Y) > b/2] = np.nan #add int indexing", "REFERENCE TO THE IMPLEMENTATION stress_imp, strain_imp, P_imp = isotropic_circular(Rx, Ry, L, thickness, nu,", "C31, C13, C40, C04, lambda] A = [] A.append([12*S[5,5], -12*S[1,5], -12*S[0,5], -(S[0,5]*a**2 +", "+ C31*X**2 + C13*Y**2)/R**2 stress['yx'] = stress['xy'].copy() strain['zz'] = S[2,0]*stress['xx'] + S[2,1]*stress['yy'] +", "pyTTE import TTcrystal, Quantity def test_isotropic_circular(): #Calculate the reference stresses and strains as", "thickness = 0.1 Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) S", "sin instead of cos for k in stress: stress[k][X**2+Y**2 > L**2/4] = np.nan", "b**2/12)) for k in stress: stress[k][np.abs(X) > a/2] = np.nan stress[k][np.abs(Y) > b/2]", "- stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) for i in range(3,4): for j in", "= np.sqrt(Rx*Ry) L = 100.0 x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) stress = {} strain = {}", "+ 5*S[0,5]*a**2*b**2/12), (S[0,1]/3 + S[5,5]/12)*a**2*b**2, S[0,0]*a**2*b**2/3 + 3*S[5,5]*b**4/20, -S[1,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, -2*S[0,5]]) #dL/C13 A.append([-S[1,5]*a**2,", "500.0 R = np.sqrt(Rx*Ry) L = 100.0 #Calculate the stresses and strains as", "R = np.sqrt(Rx*Ry) L = 100.0 #Calculate the stresses and strains as in", "-3*S[1,5]*a**4/20, -S[1,5]*a**2*b**2/12, 3*S[1,1]*a**4/20, S[0,1]*a**2*b**2/12, S[1,1]]) #dL/C40 A.append([-S[0,5]*b**2, S[0,1]*b**2, S[0,0]*b**2, S[0,0]*a**2*b**2/12 + 3*S[0,1]*b**4/20, -S[0,5]*a**2*b**2/12,", "np.ones(X.shape) mask[np.abs(X)>a/2] = 0 mask[np.abs(Y)>b/2] = 0 for key in stress: stress[key][mask <", "strain_imp, P_imp = anisotropic_circular(Rx, Ry, L, thickness, S) meps = np.finfo(np.float).eps #machine epsilon", "range(3,4): for j in range(3,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE", "tbcalc.transverse_deformation import * from pyTTE import TTcrystal, Quantity def test_isotropic_circular(): #Calculate the reference", "+ S[1,1]*a**2*b**2/12, 3*S[0,1]*b**4/20 + S[0,0]*a**2*b**2/12, 2*S[0,1] + S[5,5]]) #dL/C22 A.append([S[5,5]*a**2, -S[1,5]*a**2, -S[0,5]*a**2, -(3*S[0,5]*a**4/20", "sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..')) from tbcalc.transverse_deformation import * from pyTTE import TTcrystal, Quantity def test_isotropic_circular():", "= 0.1 Rx = 1000.0 Ry = 500.0 a = 100.0 b =", "-2*S[0,5], S[1,1], S[0,0], 0]) A = np.array(A) C11, C20, C02, C22, C31, C13,", "- stress_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]),", "linear system A*x = b B = np.array([0,0,0,0,0,0,0,0,-0.5]) #Construction of A matrix rows", "= 0.22 thickness = 0.1 Rx = 1000.0 Ry = 500.0 R =", "- strain_imp['zz'](X,Y)) < meps, np.logical_and(np.isnan(strain['zz']), np.isnan(strain_imp['zz'](X,Y))))) #check the contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry)", "3*Y**2 -L**2/4) stress['yy'] = -E/(16*R**2)*(3*X**2 + Y**2 -L**2/4) stress['xy'] = E/(8*R**2)*X*Y stress['yx'] =", "{} #Numerical solution through solving a linear system A*x = b B =", "stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in range(1,4): for j in range(1,4): strain[i*10+j] =", "isotropic_circular(Rx, Ry, L, thickness, nu, E) meps = np.finfo(np.float).eps #machine epsilon for i", "= 100.0 x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) stress = {} strain = {} stress['xx'] = -E/(16*R**2)*(X**2", "= S/E stress_iso, strain_iso, P_iso = isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E)", "A matrix rows #[C11, C20, C02, C22, C31, C13, C40, C04, lambda] A", "= anisotropic_circular(Rx, Ry, L, thickness, S) x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon", "#dL/C02 A.append([-12*S[1,5], 12*S[1,1], 12*S[1,0], (S[1,0]*a**2 + S[1,1]*b**2), -S[1,5]*a**2, -S[1,5]*b**2, S[1,1]*a**2, S[0,1]*b**2, 0]) #dL/C20", "indexing int2char_ind = ['','x','y','z'] for i in range(1,3): for j in range(1,3): stress[i*10+j]", "S[0,0]*a**2*b**2/12 + 3*S[0,1]*b**4/20, -S[0,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, S[0,1]*a**2*b**2/12, 3*S[0,0]*b**4/20, S[0,0]]) #dL/C04 A.append([0, 0, 0, 2*S[0,1]", "in range(1,3): for j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in [1,3]:", "stress[k][np.abs(Y) > b/2] = np.nan for k in strain: strain[k][np.abs(X) > a/2] =", "C22*Y**2 + 2*C31*X*Y + C40*X**2)/R**2 stress['xy'] = -(C11 + 2*C22*X*Y + C31*X**2 +", "2*S[0,1] + S[5,5], -2*S[1,5], -2*S[0,5], S[1,1], S[0,0], 0]) A = np.array(A) C11, C20,", "stress_imp, strain_imp, P_imp = anisotropic_circular(Rx, Ry, L, thickness, S) meps = np.finfo(np.float).eps #machine", "-L**2/4) stress['yy'] = -E/(16*R**2)*(3*X**2 + Y**2 -L**2/4) stress['xy'] = E/(8*R**2)*X*Y stress['yx'] = stress['xy']", "in range(1,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y) - strain_aniso[num_ind](X,Y)) <", "strain_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[num_ind](X,Y)), np.isnan(strain_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y) - strain_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[str_ind](X,Y)), np.isnan(strain_aniso[str_ind](X,Y)))))", "3*S[0,1]*a**4/20, -3*S[1,5]*a**4/20, -S[1,5]*a**2*b**2/12, 3*S[1,1]*a**4/20, S[0,1]*a**2*b**2/12, S[1,1]]) #dL/C40 A.append([-S[0,5]*b**2, S[0,1]*b**2, S[0,0]*b**2, S[0,0]*a**2*b**2/12 + 3*S[0,1]*b**4/20,", "< meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_isotropic_rectangular(): #Calculate the reference stresses and strains as", "meps = np.finfo(np.float).eps #machine epsilon int2char_ind = ['','x','y','z'] meps = np.finfo(np.float).eps #machine epsilon", "= S[2,0]*stress['xx'] + S[2,1]*stress['yy'] + S[2,5]*stress['xy'] strain['xz'] = 0.5*(S[3,0]*stress['xx'] + S[3,1]*stress['yy'] + S[3,5]*stress['xy']", "mask = np.ones(X.shape) mask[np.abs(X)>a/2] = 0 mask[np.abs(Y)>b/2] = 0 for key in stress:", "deprecated sbcalc x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) r_squared = X**2+Y**2 phi = np.arctan2(Y,X) stress = {}", "S[0,1] = -nu S[0,2] = -nu S[1,2] = -nu S[1,0] = -nu S[2,0]", "= -(C11 + 2*C22*X*Y + C31*X**2 + C13*Y**2)/R**2 stress['yx'] = stress['xy'].copy() strain['zz'] =", "i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y) - strain_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[num_ind](X,Y)), np.isnan(strain_aniso[num_ind](X,Y))))) assert", "key in stress: stress[key][mask < 0.5] = np.nan for key in strain: strain[key][mask", "def test_anisotropic_circular_vs_sbcalc(): #For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,1], thickness=Quantity(0.1,'mm')) S = ttx.S.in_units('GPa^-1') thickness", "= 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) a = 100.0 b =", "(C20 + C22*Y**2 + 2*C31*X*Y + C40*X**2)/R**2 stress['xy'] = -(C11 + 2*C22*X*Y +", "* (b**2/12-Y**2 + ((1+nu)/2 + 5*(b/a)**2 + (1-nu)/2*(b/a)**4)*(a**2/12-X**2)) #sbcalc has a typo on", "P_aniso = anisotropic_circular(Rx, Ry, L, thickness, S) x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine", "-E/(16*R**2)*(X**2 + 3*Y**2 -L**2/4) stress['yy'] = -E/(16*R**2)*(3*X**2 + Y**2 -L**2/4) stress['xy'] = E/(8*R**2)*X*Y", "np.nan for k in strain: strain[k][X**2+Y**2 > L**2/4] = np.nan #add int indexing", "2*C31*X*Y + C40*X**2)/R**2 stress['xy'] = -(C11 + 2*C22*X*Y + C31*X**2 + C13*Y**2)/R**2 stress['yx']", "0.1 Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) L = 100.0", "= 100.0 b = 100.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) thickness = 0.1 Rx = 1000.0", "{} stress['xx'] = -E/(16*R**2)*(X**2 + 3*Y**2 -L**2/4) stress['yy'] = -E/(16*R**2)*(3*X**2 + Y**2 -L**2/4)", "on Sat May 9 00:09:00 2020 @author: aripekka \"\"\" import sys import os.path", "mask[np.abs(X)>a/2] = 0 mask[np.abs(Y)>b/2] = 0 for key in stress: stress[key][mask < 0.5]", "j == 1: continue num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] -", "= 2*(1+nu) S[5,5] = 2*(1+nu) S = S/E stress_iso, strain_iso, P_iso = isotropic_circular(Rx,", "stress['xy'] = 2*E/(g*R**2)*X*Y stress['yx'] = stress['xy'] strain['zz'] = nu/(g*R**2) * (((3+nu)/2+5*(b/a)**2+(1-nu)/2*(b/a)**4)*(X**2 - a**2/12)+\\", "a = 100.0 b = 100.0 S = np.zeros((6,6)) #The elastic matrix for", "= {} stress['xx'] = -E/(16*R**2)*(X**2 + 3*Y**2 -L**2/4) stress['yy'] = -E/(16*R**2)*(3*X**2 + Y**2", "TTcrystal, Quantity def test_isotropic_circular(): #Calculate the reference stresses and strains as implemented in", "of cos for k in stress: stress[k][X**2+Y**2 > L**2/4] = np.nan for k", "range(1,3): for j in range(1,3): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y)", "= np.sqrt(Rx*Ry) S = ttx.S.in_units('GPa^-1') stress = {} strain = {} #Numerical solution", "solving a linear system A*x = b B = np.array([0,0,0,0,0,0,0,0,-0.5]) #Construction of A", "1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) L = 100.0 x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) stress", "S[0,0]*b**2, S[0,0]*a**2*b**2/12 + 3*S[0,1]*b**4/20, -S[0,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, S[0,1]*a**2*b**2/12, 3*S[0,0]*b**4/20, S[0,0]]) #dL/C04 A.append([0, 0, 0,", "= TTcrystal(crystal='Si',hkl=[9,5,3], thickness=Quantity(0.1,'mm')) a = 100.0 b = 100.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) thickness =", "1 S[2,2] = 1 S[0,1] = -nu S[0,2] = -nu S[1,2] = -nu", "range(1,3): for j in range(1,3): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[num_ind]", "#For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,1], thickness=Quantity(0.1,'mm')) S = ttx.S.in_units('GPa^-1') thickness = 0.1", "np.array([0,0,0,0,0,0,0,0,-0.5]) #Construction of A matrix rows #[C11, C20, C02, C22, C31, C13, C40,", "stress_imp[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y)))))", "0.1 Rx = 1000.0 Ry = 500.0 a = 100.0 b = 100.0", "3*S[0,1]*b**4/20 + S[0,0]*a**2*b**2/12, 2*S[0,1] + S[5,5]]) #dL/C22 A.append([S[5,5]*a**2, -S[1,5]*a**2, -S[0,5]*a**2, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12),", "#Construction of A matrix rows #[C11, C20, C02, C22, C31, C13, C40, C04,", "for j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in range(3,4): for j", "j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in range(3,4): for j in", "TTcrystal(crystal='Si',hkl=[9,5,3], thickness=Quantity(0.1,'mm')) a = 100.0 b = 100.0 x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) thickness = 0.1", "np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps, np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y))))) def test_anisotropic_rectangular_vs_old_Version(): #For compliance matrix ttx", "stress['xx'] = -E/(16*R**2)*(X**2 + 3*Y**2 -L**2/4) stress['yy'] = -E/(16*R**2)*(3*X**2 + Y**2 -L**2/4) stress['xy']", "thickness, S) meps = np.finfo(np.float).eps #machine epsilon #add int indexing int2char_ind = ['','x','y','z']", "S[5,5], -2*S[1,5], -2*S[0,5], S[1,1], S[0,0], 0]) A = np.array(A) C11, C20, C02, C22,", "Ry, a, b, thickness, S) #add int indexing int2char_ind = ['','x','y','z'] for i", "meps = np.finfo(np.float).eps #machine epsilon #Check stresses for i in range(1,3): for j", "S[1,1] = 1 S[2,2] = 1 S[0,1] = -nu S[0,2] = -nu S[1,2]", "= ['','x','y','z'] meps = np.finfo(np.float).eps #machine epsilon #Check stresses for i in range(1,3):", "a, b, thickness, S) x=np.linspace(-a/2,a/2,150) X,Y=np.meshgrid(x,x) meps = np.finfo(np.float).eps #machine epsilon int2char_ind =", "X,Y=np.meshgrid(x,x) thickness = 0.1 Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry)", "stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in [1,3]: for j in [1,3]: if i==1", "for j in range(3,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] -", "= isotropic_circular(Rx, Ry, L, thickness, nu, E) stress_aniso, strain_aniso, P_aniso = anisotropic_circular(Rx, Ry,", "in range(3,4): num_ind = i*10+j str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) <", "= -E/(16*R**2)*(X**2 + 3*Y**2 -L**2/4) stress['yy'] = -E/(16*R**2)*(3*X**2 + Y**2 -L**2/4) stress['xy'] =", "< 0.5] = np.nan stress_imp, strain_imp, P_imp = anisotropic_rectangular(Rx, Ry, a, b, thickness,", "stress['xx'] = (C02 + C22*X**2 + 2*C13*X*Y + C04*Y**2)/R**2 stress['yy'] = (C20 +", "12*S[1,0], (S[1,0]*a**2 + S[1,1]*b**2), -S[1,5]*a**2, -S[1,5]*b**2, S[1,1]*a**2, S[0,1]*b**2, 0]) #dL/C20 A.append([-(S[0,5]*a**2 + S[1,5]*b**2),", "test_anisotropic_circular_vs_sbcalc(): #For compliance matrix ttx = TTcrystal(crystal='Si',hkl=[9,5,1], thickness=Quantity(0.1,'mm')) S = ttx.S.in_units('GPa^-1') thickness =", "= 500.0 a = 100.0 b = 100.0 S = np.zeros((6,6)) #The elastic", "+ S[3,5]*stress['xy'] ) strain['zx'] = strain['xz'].copy() #Apply mask mask = np.ones(X.shape) mask[np.abs(X)>a/2] =", "np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y) - stress_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[num_ind](X,Y)), np.isnan(stress_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y) - stress_aniso[str_ind](X,Y)) < meps,", "E) meps = np.finfo(np.float).eps #machine epsilon for i in range(1,3): for j in", "P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_isotropic_circular(): E = 165 nu = 0.22", "stress['xy'] strain['xx'] = ((1-nu)*L**2/4-(1-3*nu)*X**2-(3-nu)*Y**2)/(16*R**2) strain['yy'] = ((1-nu)*L**2/4-(1-3*nu)*Y**2-(3-nu)*X**2)/(16*R**2) strain['xy'] = (1+nu)/(8*R**2)*X*Y strain['yx'] = strain['xy']", "A.append([S[5,5]*b**2, -S[1,5]*b**2, -S[0,5]*b**2, -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), (S[0,1]/3 + S[5,5]/12)*a**2*b**2, S[0,0]*a**2*b**2/3 + 3*S[5,5]*b**4/20, -S[1,5]*a**2*b**2/12,", "strain_iso, P_iso = isotropic_circular(Rx, Ry, L, thickness, nu, E) stress_aniso, strain_aniso, P_aniso =", "np.all(np.logical_or(np.abs(strain[str_ind] - strain_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y))))) #check the contact force P =", "= strain['xz'].copy() #Apply mask mask = np.ones(X.shape) mask[np.abs(X)>a/2] = 0 mask[np.abs(Y)>b/2] = 0", "= 1 S[0,1] = -nu S[0,2] = -nu S[1,2] = -nu S[1,0] =", "str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y) - strain_aniso[num_ind](X,Y)) < meps, np.logical_and(np.isnan(strain_iso[num_ind](X,Y)), np.isnan(strain_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y)", "2*E/(g*R**2)*X*Y stress['yx'] = stress['xy'] strain['zz'] = nu/(g*R**2) * (((3+nu)/2+5*(b/a)**2+(1-nu)/2*(b/a)**4)*(X**2 - a**2/12)+\\ ((3+nu)/2+5*(a/b)**2+(1-nu)/2*(a/b)**4)*(Y**2 -", "int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain['zz'] - strain_imp['zz'](X,Y))", "np.isnan(strain_imp[str_ind](X,Y))))) #check the contact force P = -thickness*(stress['xx']/Rx+stress['yy']/Ry) assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) <", "500.0 a = 100.0 b = 100.0 S = np.zeros((6,6)) #The elastic matrix", "2*C22*X*Y + C31*X**2 + C13*Y**2)/R**2 stress['yx'] = stress['xy'].copy() strain['zz'] = S[2,0]*stress['xx'] + S[2,1]*stress['yy']", "< meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain['zz'] - strain_imp['zz'](X,Y)) < meps, np.logical_and(np.isnan(strain['zz']), np.isnan(strain_imp['zz'](X,Y))))) #check", "epsilon #Check stresses for i in range(1,3): for j in range(1,3): num_ind =", "+ 3*S[0,1]*a**4/20, -3*S[1,5]*a**4/20, -S[1,5]*a**2*b**2/12, 3*S[1,1]*a**4/20, S[0,1]*a**2*b**2/12, S[1,1]]) #dL/C40 A.append([-S[0,5]*b**2, S[0,1]*b**2, S[0,0]*b**2, S[0,0]*a**2*b**2/12 +", "0.1 Rx = 1000.0 Ry = 500.0 R = np.sqrt(Rx*Ry) S = ttx.S.in_units('GPa^-1')", "+ 3*S[5,5]*b**4/20, -S[1,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, -2*S[0,5]]) #dL/C13 A.append([-S[1,5]*a**2, S[1,1]*a**2, S[0,1]*a**2, S[1,1]*a**2*b**2/12 + 3*S[0,1]*a**4/20, -3*S[1,5]*a**4/20,", "assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_isotropic_circular(): E = 165", "for j in range(1,4): strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]] #COMPARE THE REFERENCE TO THE IMPLEMENTATION", "numpy as np sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..')) from tbcalc.transverse_deformation import * from pyTTE import TTcrystal,", "meps, np.logical_and(np.isnan(stress_iso[num_ind](X,Y)), np.isnan(stress_aniso[num_ind](X,Y))))) assert np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y) - stress_aniso[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress_iso[str_ind](X,Y)), np.isnan(stress_aniso[str_ind](X,Y))))) #Check strains", "b, thickness, nu, E) meps = np.finfo(np.float).eps #machine epsilon for i in range(1,3):", "X*0 strain['yz'] = X*0 strain['zy'] = X*0 for k in stress: stress[k][X**2+Y**2 >", "May 9 00:09:00 2020 @author: aripekka \"\"\" import sys import os.path import numpy", "= -nu S[0,2] = -nu S[1,2] = -nu S[1,0] = -nu S[2,0] =", "((1+nu)/2 + 5*(a/b)**2 + (1-nu)/2*(a/b)**4)*(b**2/12-Y**2)) stress['yy'] = E/(g*R**2) * (b**2/12-Y**2 + ((1+nu)/2 +", "S[3,5]*stress['xy'] ) strain['zx'] = strain['xz'].copy() #Apply mask mask = np.ones(X.shape) mask[np.abs(X)>a/2] = 0", "= isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E) stress_aniso, strain_aniso, P_aniso = anisotropic_rectangular(Rx,", "> b/2] = np.nan for k in strain: strain[k][np.abs(X) > a/2] = np.nan", "+ 3*Y**2 -L**2/4) stress['yy'] = -E/(16*R**2)*(3*X**2 + Y**2 -L**2/4) stress['xy'] = E/(8*R**2)*X*Y stress['yx']", "12*S[0,0], (S[0,0]*a**2 + S[0,1]*b**2), -S[0,5]*a**2, -S[0,5]*b**2, S[0,1]*a**2, S[0,0]*b**2, 0]) #dL/C02 A.append([-12*S[1,5], 12*S[1,1], 12*S[1,0],", "= 1 S[2,2] = 1 S[0,1] = -nu S[0,2] = -nu S[1,2] =", "< meps, np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y))))) def test_anisotropic_circular_vs_isotropic_circular(): E = 165 nu = 0.22 thickness", "in the now deprecated sbcalc x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) r_squared = X**2+Y**2 phi = np.arctan2(Y,X)", "#dL/C40 A.append([-S[0,5]*b**2, S[0,1]*b**2, S[0,0]*b**2, S[0,0]*a**2*b**2/12 + 3*S[0,1]*b**4/20, -S[0,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, S[0,1]*a**2*b**2/12, 3*S[0,0]*b**4/20, S[0,0]]) #dL/C04", "3*S[0,0]*b**4/20, S[0,0]]) #dL/C04 A.append([0, 0, 0, 2*S[0,1] + S[5,5], -2*S[1,5], -2*S[0,5], S[1,1], S[0,0],", "strain_iso, P_iso = isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E) stress_aniso, strain_aniso, P_aniso", "j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in [1,3]: for j in", "C13, C40, C04, L = np.linalg.solve(A,B) stress['xx'] = (C02 + C22*X**2 + 2*C13*X*Y", "E/(g*R**2) * (a**2/12-X**2 + ((1+nu)/2 + 5*(a/b)**2 + (1-nu)/2*(a/b)**4)*(b**2/12-Y**2)) stress['yy'] = E/(g*R**2) *", "in range(1,3): str_ind = int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y)))))", "+ S[2,5]*stress['xy'] strain['xz'] = 0.5*(S[3,0]*stress['xx'] + S[3,1]*stress['yy'] + S[3,5]*stress['xy'] ) strain['zx'] = strain['xz'].copy()", "(S[2,0]+S[2,1])*L**2/4 uzzaux2 = 2*(S[2,0]+S[2,1]) uzzaux3 = np.sqrt((S[2,1]-S[2,0])**2+S[2,5]**2) beta = np.arctan2(S[2,5],(S[2,1]-S[2,0])) strain['zz'] = D*(uzzaux1", "S[5,5]*a**2, S[5,5]*b**2, -S[1,5]*a**2, -S[0,5]*b**2, 0]) #dL/C11 A.append([-12*S[0,5], 12*S[0,1], 12*S[0,0], (S[0,0]*a**2 + S[0,1]*b**2), -S[0,5]*a**2,", "strain['yx'] = strain['xy'] strain['zz'] = nu/(4*R**2)*(X**2+Y**2-L**2/8) #missing zero strains from sbcalc strain['xz'] =", "P_imp = isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E) meps = np.finfo(np.float).eps #machine", "-S[1,5]*a**2*b**2/12, 3*S[1,1]*a**4/20, S[0,1]*a**2*b**2/12, S[1,1]]) #dL/C40 A.append([-S[0,5]*b**2, S[0,1]*b**2, S[0,0]*b**2, S[0,0]*a**2*b**2/12 + 3*S[0,1]*b**4/20, -S[0,5]*a**2*b**2/12, -3*S[0,5]*b**4/20,", "i in range(1,3): for j in range(1,3): stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]] for i in", "= int2char_ind[i]+int2char_ind[j] assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps, np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y))))) assert np.all(np.logical_or(np.abs(strain['zz'] -", "-S[0,5]*b**2, S[0,1]*a**2, S[0,0]*b**2, 0]) #dL/C02 A.append([-12*S[1,5], 12*S[1,1], 12*S[1,0], (S[1,0]*a**2 + S[1,1]*b**2), -S[1,5]*a**2, -S[1,5]*b**2,", "strain[key][mask < 0.5] = np.nan stress_imp, strain_imp, P_imp = anisotropic_rectangular(Rx, Ry, a, b,", "anisotropic_rectangular(Rx, Ry, a, b, thickness, S) #add int indexing int2char_ind = ['','x','y','z'] for", "strain['xz'] = X*0 strain['zx'] = X*0 strain['yz'] = X*0 strain['zy'] = X*0 for", "= 2*(1+nu) S = S/E stress_iso, strain_iso, P_iso = isotropic_circular(Rx, Ry, L, thickness,", "and strains as in the now deprecated sbcalc x=np.linspace(-L/2,L/2,150) X,Y=np.meshgrid(x,x) r_squared = X**2+Y**2" ]
[ "can re-load quickly. \"\"\" vec_file = join(self.dir, word_vec_name + self.WORD_VEC_SUFFIX + \".npy\") if", "return [] with open(file, \"rb\") as f: return pickle.load(f) def __getstate__(self): state =", "WARNING: this includes dev words This exists since loading word-vecs each time we", "dir = join(CORPUS_DIR, self.NAME) if not exists(dir) or not isdir(dir): raise ValueError(\"No directory", "self.paragraphs[-1] def __repr__(self) -> str: return f\"Title: {self.title}. Number of paragraphs: {len(self.paragraphs)}\" class", "SquadParagraph, distractors: List[SquadParagraph]): super().__init__(question_id, question, answers, paragraph) # self.distractors = [x.get_paragraph_without_text_pickling() for x", "return RelevanceQuestion(dataset_name='squad', question_id=squad_question.question_id, question_tokens=squad_question.question, supporting_facts=[squad_question.paragraph.par_text], distractors=[x.par_text for x in squad_question.distractors]) class SquadRelevanceCorpus(Configurable): TRAIN_DOC_FILE", "leaving out answer spans. When we want to predict answers, we will deal", "def __getstate__(self): state = self.__dict__.copy() state['train_title_to_document'] = None state['dev_title_to_document'] = None return state", "evidence(self): return None def get_vocab_file(self): self.get_vocab() return join(self.dir, self.VOCAB_FILE) def get_vocab(self): \"\"\" get", "self.TRAIN_DOC_FILE)) else: if self.dev_title_to_document is None: self.dev_title_to_document = self._load(join(self.dir, self.DEV_DOC_FILE)) def _insert_text_to_paragraph(self, paragraph:", "data. For now, leaving out answer spans. When we want to predict answers,", "List[SquadDocument], dev: List[SquadQuestionWithDistractors]): dir = join(CORPUS_DIR, SquadRelevanceCorpus.NAME) # if isfile(dir) or (exists(dir) and", "questions: self._insert_text_to_question(q, train) def get_train(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.TRAIN_FILE)) self._populate_questions(questions, train=True)", "bool): title_to_doc = self.train_title_to_document if train else self.dev_title_to_document paragraph.par_text = title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text paragraph.pickle_text =", "\"train_questions.pkl\" DEV_DOC_FILE = \"dev_documents.pkl\" DEV_FILE = \"dev_questions.pkl\" NAME = \"squad\" VOCAB_FILE = \"squad_vocab.txt\"", "return [x.rstrip() for x in f] else: voc = set() for fn in", "if isfile(vec_file): print(\"Loading word vec %s for %s from cache\" % (word_vec_name, self.name))", "import CORPUS_DIR from hotpot.configurable import Configurable from hotpot.data_handling.data import RelevanceQuestion from hotpot.data_handling.word_vectors import", "SquadQuestionWithDistractors, train: bool): for par in [question.paragraph] + question.distractors: self._insert_text_to_paragraph(par, train) def _populate_questions(self,", "(self.name, word_vec_name)) voc = self.get_vocab() vecs = load_word_vectors(word_vec_name, voc) with open(vec_file, \"wb\") as", "self._load(join(self.dir, self.DEV_FILE)) self._populate_questions(questions, train=False) return questions def get_test(self) -> List[SquadQuestionWithDistractors]: return [] def", "to the case-insensitive vocab of this corpus. WARNING: this includes dev words This", "self.paragraphs = paragraphs self.id_to_par = self._build_id_paragraph_dict() def _build_id_paragraph_dict(self): return {x.par_id: x for x", "cache the pruned vecs on-disk as a .npy file we can re-load quickly.", "[] def _load(self, file): if not exists(file): return [] with open(file, \"rb\") as", "None: with open(join(dir, name), 'wb') as f: pickle.dump(data, f) def __init__(self): dir =", "TRAIN_DOC_FILE = \"train_documents.pkl\" TRAIN_FILE = \"train_questions.pkl\" DEV_DOC_FILE = \"dev_documents.pkl\" DEV_FILE = \"dev_questions.pkl\" NAME", "CORPUS_DIR from hotpot.configurable import Configurable from hotpot.data_handling.data import RelevanceQuestion from hotpot.data_handling.word_vectors import load_word_vectors", "in self.distractors): continue # self.distractors.append(paragraph.get_paragraph_without_text_pickling()) self.distractors.append(paragraph) def squad_question_to_relevance_question(squad_question: SquadQuestionWithDistractors) -> RelevanceQuestion: return RelevanceQuestion(dataset_name='squad',", "ValueError(\"No directory %s, corpus not built yet?\" % dir) self.dir = dir self.train_title_to_document", "self.name)) with open(vec_file, \"rb\") as f: return pickle.load(f) else: print(\"Building pruned word vec", "id already exists in this document!\") if par.doc_title != self.title: raise ValueError(\"Paragraph title", "and x.doc_title == paragraph.doc_title) for x in self.distractors): continue # self.distractors.append(paragraph.get_paragraph_without_text_pickling()) self.distractors.append(paragraph) def", "question: List[str], answers: Set[str], paragraph: SquadParagraph): self.question_id = question_id self.question = question self.answers", "includes train/dev/test files \"\"\" voc_file = join(self.dir, self.VOCAB_FILE) if exists(voc_file): with open(voc_file, \"r\")", "train else self.dev_title_to_document paragraph.par_text = title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text paragraph.pickle_text = True # So that there", "f: return [x.rstrip() for x in f] else: voc = set() for fn", "x in para.par_text) voc_list = sorted(list(voc)) with open(voc_file, \"w\") as f: for word", "hotpot.config import CORPUS_DIR from hotpot.configurable import Configurable from hotpot.data_handling.data import RelevanceQuestion from hotpot.data_handling.word_vectors", "for doc in train_documents} if len(train_document_dict) != len(train_documents): raise ValueError(\"different train documents have", "answer spans. When we want to predict answers, we will deal with it.\"\"\"", "# self.distractors.append(paragraph.get_paragraph_without_text_pickling()) self.distractors.append(paragraph) def squad_question_to_relevance_question(squad_question: SquadQuestionWithDistractors) -> RelevanceQuestion: return RelevanceQuestion(dataset_name='squad', question_id=squad_question.question_id, question_tokens=squad_question.question, supporting_facts=[squad_question.paragraph.par_text],", "Squad Question and paragraphs.\"\"\" def __init__(self, question_id: str, question: List[str], answers: Set[str], paragraph:", "exists, isfile, isdir from os import makedirs, listdir from hotpot.config import CORPUS_DIR from", "\"dev_questions.pkl\" NAME = \"squad\" VOCAB_FILE = \"squad_vocab.txt\" WORD_VEC_SUFFIX = \"_pruned\" @staticmethod def make_corpus(train_documents:", "add_distractors(self, paragraphs: List[SquadParagraph]): \"\"\" Doesn't add duplicates \"\"\" for paragraph in paragraphs: if", "+ [question.paragraph]): voc.update(x.lower() for x in para.par_text) voc_list = sorted(list(voc)) with open(voc_file, \"w\")", "\"\"\" Doesn't add duplicates \"\"\" for paragraph in paragraphs: if any((x.par_id == paragraph.par_id", "= join(self.dir, word_vec_name + self.WORD_VEC_SUFFIX + \".npy\") if isfile(vec_file): print(\"Loading word vec %s", "in [self.get_train, self.get_dev, self.get_test]: for question in fn(): voc.update(x.lower() for x in question.question)", "= paragraphs self.id_to_par = self._build_id_paragraph_dict() def _build_id_paragraph_dict(self): return {x.par_id: x for x in", "be a big pain, so we cache the pruned vecs on-disk as a", "print(\"Building pruned word vec %s for %s\" % (self.name, word_vec_name)) voc = self.get_vocab()", "ResourceLoader \"\"\" Squad data. For now, leaving out answer spans. When we want", "words This exists since loading word-vecs each time we startup can be a", "SquadRelevanceCorpus.NAME) # if isfile(dir) or (exists(dir) and len(listdir(dir))) > 0: # raise ValueError(\"Directory", "title: str, paragraphs: List[SquadParagraph]): self.title = title self.paragraphs = paragraphs self.id_to_par = self._build_id_paragraph_dict()", "title!\") dev_document_dict = {doc.title: doc for doc in dev_documents} if len(dev_document_dict) != len(dev_documents):", "return vecs def get_resource_loader(self): return ResourceLoader(self.get_pruned_word_vecs) def _load_document_dict(self, train: bool): if train: if", "\"_pruned\" @staticmethod def make_corpus(train_documents: List[SquadDocument], train: List[SquadQuestionWithDistractors], dev_documents: List[SquadDocument], dev: List[SquadQuestionWithDistractors]): dir =", "in para.par_text) voc_list = sorted(list(voc)) with open(voc_file, \"w\") as f: for word in", "as f: for word in voc_list: f.write(word) f.write(\"\\n\") return voc_list def get_pruned_word_vecs(self, word_vec_name,", "voc_list: f.write(word) f.write(\"\\n\") return voc_list def get_pruned_word_vecs(self, word_vec_name, voc=None): \"\"\" Loads word vectors", "+ \".npy\") if isfile(vec_file): print(\"Loading word vec %s for %s from cache\" %", "def __init__(self, doc_title: str, par_id: int, par_text: List[str], pickle_text=True): self.doc_title = doc_title self.par_id", "in questions: self._insert_text_to_question(q, train) def get_train(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.TRAIN_FILE)) self._populate_questions(questions,", "f: pickle.dump(data, f) def __init__(self): dir = join(CORPUS_DIR, self.NAME) if not exists(dir) or", "duplicates \"\"\" for paragraph in paragraphs: if any((x.par_id == paragraph.par_id and x.doc_title ==", "self.__dict__.copy() state['par_text'] = None return state return self.__dict__ class SquadDocument(object): def __init__(self, title:", "distractors] self.distractors = distractors def add_distractors(self, paragraphs: List[SquadParagraph]): \"\"\" Doesn't add duplicates \"\"\"", "question in fn(): voc.update(x.lower() for x in question.question) for para in (question.distractors +", "we will deal with it.\"\"\" class SquadParagraph(object): def __init__(self, doc_title: str, par_id: int,", "self.pickle_text: state = self.__dict__.copy() state['par_text'] = None return state return self.__dict__ class SquadDocument(object):", "Question and paragraphs.\"\"\" def __init__(self, question_id: str, question: List[str], answers: Set[str], paragraph: SquadParagraph):", "str, question: List[str], answers: Set[str], paragraph: SquadParagraph, distractors: List[SquadParagraph]): super().__init__(question_id, question, answers, paragraph)", "bool): self._load_document_dict(train) for q in questions: self._insert_text_to_question(q, train) def get_train(self) -> List[SquadQuestionWithDistractors]: questions", "for %s from cache\" % (word_vec_name, self.name)) with open(vec_file, \"rb\") as f: return", "for question in fn(): voc.update(x.lower() for x in question.question) for para in (question.distractors", "and paragraphs.\"\"\" def __init__(self, question_id: str, question: List[str], answers: Set[str], paragraph: SquadParagraph): self.question_id", "in this document!\") if par.doc_title != self.title: raise ValueError(\"Paragraph title not matching document", "we want to predict answers, we will deal with it.\"\"\" class SquadParagraph(object): def", "voc_list def get_pruned_word_vecs(self, word_vec_name, voc=None): \"\"\" Loads word vectors that have been pruned", "len(train_documents): raise ValueError(\"different train documents have the same title!\") dev_document_dict = {doc.title: doc", "None return state return self.__dict__ class SquadDocument(object): def __init__(self, title: str, paragraphs: List[SquadParagraph]):", "typing import List, Set from os.path import join, exists, isfile, isdir from os", "isdir from os import makedirs, listdir from hotpot.config import CORPUS_DIR from hotpot.configurable import", "from hotpot.config import CORPUS_DIR from hotpot.configurable import Configurable from hotpot.data_handling.data import RelevanceQuestion from", "RelevanceQuestion from hotpot.data_handling.word_vectors import load_word_vectors from hotpot.utils import ResourceLoader \"\"\" Squad data. For", "if exists(voc_file): with open(voc_file, \"r\") as f: return [x.rstrip() for x in f]", "on-disk as a .npy file we can re-load quickly. \"\"\" vec_file = join(self.dir,", "for %s\" % (self.name, word_vec_name)) voc = self.get_vocab() vecs = load_word_vectors(word_vec_name, voc) with", "doc in train_documents} if len(train_document_dict) != len(train_documents): raise ValueError(\"different train documents have the", "def get_pruned_word_vecs(self, word_vec_name, voc=None): \"\"\" Loads word vectors that have been pruned to", "train: bool): for par in [question.paragraph] + question.distractors: self._insert_text_to_paragraph(par, train) def _populate_questions(self, questions:", "= self._load(join(self.dir, self.DEV_DOC_FILE)) def _insert_text_to_paragraph(self, paragraph: SquadParagraph, train: bool): title_to_doc = self.train_title_to_document if", "isfile, isdir from os import makedirs, listdir from hotpot.config import CORPUS_DIR from hotpot.configurable", "self.par_text, pickle_text=False) def __repr__(self) -> str: return f\"Title: {self.doc_title}, Id: {self.par_id}\\n\" \\ f\"Paragraph:\\n\"", "from os.path import join, exists, isfile, isdir from os import makedirs, listdir from", "not exists(dir) or not isdir(dir): raise ValueError(\"No directory %s, corpus not built yet?\"", "in train_documents} if len(train_document_dict) != len(train_documents): raise ValueError(\"different train documents have the same", "question_id=squad_question.question_id, question_tokens=squad_question.question, supporting_facts=[squad_question.paragraph.par_text], distractors=[x.par_text for x in squad_question.distractors]) class SquadRelevanceCorpus(Configurable): TRAIN_DOC_FILE = \"train_documents.pkl\"", "self._load(join(self.dir, self.TRAIN_FILE)) self._populate_questions(questions, train=True) return questions def get_dev(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir,", "answers: Set[str], paragraph: SquadParagraph, distractors: List[SquadParagraph]): super().__init__(question_id, question, answers, paragraph) # self.distractors =", "pickle_text=True)) self.id_to_par[par.par_id] = self.paragraphs[-1] def __repr__(self) -> str: return f\"Title: {self.title}. Number of", "%s for %s\" % (self.name, word_vec_name)) voc = self.get_vocab() vecs = load_word_vectors(word_vec_name, voc)", "' '.join(self.paragraph.par_text) class SquadQuestionWithDistractors(SquadQuestion): def __init__(self, question_id: str, question: List[str], answers: Set[str], paragraph:", "SquadRelevanceCorpus(Configurable): TRAIN_DOC_FILE = \"train_documents.pkl\" TRAIN_FILE = \"train_questions.pkl\" DEV_DOC_FILE = \"dev_documents.pkl\" DEV_FILE = \"dev_questions.pkl\"", "else: if self.dev_title_to_document is None: self.dev_title_to_document = self._load(join(self.dir, self.DEV_DOC_FILE)) def _insert_text_to_paragraph(self, paragraph: SquadParagraph,", "ValueError(\"Paragraph title not matching document title!\") self.paragraphs.append(SquadParagraph(par.doc_title, par.par_id, par.par_text, pickle_text=True)) self.id_to_par[par.par_id] = self.paragraphs[-1]", "questions def get_dev(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.DEV_FILE)) self._populate_questions(questions, train=False) return questions", "self.paragraphs} def get_par(self, par_id) -> SquadParagraph: return self.id_to_par[par_id] def add_par(self, par: SquadParagraph): if", "voc=None): \"\"\" Loads word vectors that have been pruned to the case-insensitive vocab", "dev_documents} if len(dev_document_dict) != len(dev_documents): raise ValueError(\"different dev documents have the same title!\")", "vec %s for %s\" % (self.name, word_vec_name)) voc = self.get_vocab() vecs = load_word_vectors(word_vec_name,", "return self.id_to_par[par_id] def add_par(self, par: SquadParagraph): if par.par_id in self.id_to_par: raise ValueError(\"This paragraph", "in (question.distractors + [question.paragraph]): voc.update(x.lower() for x in para.par_text) voc_list = sorted(list(voc)) with", "List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.TRAIN_FILE)) self._populate_questions(questions, train=True) return questions def get_dev(self) -> List[SquadQuestionWithDistractors]:", "the same title!\") for name, data in [(SquadRelevanceCorpus.TRAIN_FILE, train), (SquadRelevanceCorpus.DEV_FILE, dev), (SquadRelevanceCorpus.TRAIN_DOC_FILE, train_document_dict),", "vecs = load_word_vectors(word_vec_name, voc) with open(vec_file, \"wb\") as f: pickle.dump(vecs, f) return vecs", "doc_title self.par_id = par_id self.par_text = par_text self.pickle_text = pickle_text @property def num_tokens(self):", "= paragraph # .get_paragraph_without_text_pickling() def __repr__(self) -> str: return f\"{self.question_id}: {' '.join(self.question)}\\nAnswer(s): {self.answers}\\n\"", "in self.id_to_par: raise ValueError(\"This paragraph id already exists in this document!\") if par.doc_title", "= self.train_title_to_document if train else self.dev_title_to_document paragraph.par_text = title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text paragraph.pickle_text = True #", "doc in dev_documents} if len(dev_document_dict) != len(dev_documents): raise ValueError(\"different dev documents have the", "unique words for this corpus, includes train/dev/test files \"\"\" voc_file = join(self.dir, self.VOCAB_FILE)", "else: print(\"Building pruned word vec %s for %s\" % (self.name, word_vec_name)) voc =", "[x.rstrip() for x in f] else: voc = set() for fn in [self.get_train,", "return join(self.dir, self.VOCAB_FILE) def get_vocab(self): \"\"\" get all-lower cased unique words for this", "par.par_text, pickle_text=True)) self.id_to_par[par.par_id] = self.paragraphs[-1] def __repr__(self) -> str: return f\"Title: {self.title}. Number", "from hotpot.configurable import Configurable from hotpot.data_handling.data import RelevanceQuestion from hotpot.data_handling.word_vectors import load_word_vectors from", "f.write(\"\\n\") return voc_list def get_pruned_word_vecs(self, word_vec_name, voc=None): \"\"\" Loads word vectors that have", "self.DEV_FILE)) self._populate_questions(questions, train=False) return questions def get_test(self) -> List[SquadQuestionWithDistractors]: return [] def _load(self,", "hotpot.configurable import Configurable from hotpot.data_handling.data import RelevanceQuestion from hotpot.data_handling.word_vectors import load_word_vectors from hotpot.utils", "__init__(self, doc_title: str, par_id: int, par_text: List[str], pickle_text=True): self.doc_title = doc_title self.par_id =", "class SquadDocument(object): def __init__(self, title: str, paragraphs: List[SquadParagraph]): self.title = title self.paragraphs =", "def add_par(self, par: SquadParagraph): if par.par_id in self.id_to_par: raise ValueError(\"This paragraph id already", "now, leaving out answer spans. When we want to predict answers, we will", "par_id) -> SquadParagraph: return self.id_to_par[par_id] def add_par(self, par: SquadParagraph): if par.par_id in self.id_to_par:", "matching document title!\") self.paragraphs.append(SquadParagraph(par.doc_title, par.par_id, par.par_text, pickle_text=True)) self.id_to_par[par.par_id] = self.paragraphs[-1] def __repr__(self) ->", "train), (SquadRelevanceCorpus.DEV_FILE, dev), (SquadRelevanceCorpus.TRAIN_DOC_FILE, train_document_dict), (SquadRelevanceCorpus.DEV_DOC_FILE, dev_document_dict)]: if data is not None: with", "self.paragraph = paragraph # .get_paragraph_without_text_pickling() def __repr__(self) -> str: return f\"{self.question_id}: {' '.join(self.question)}\\nAnswer(s):", "paragraphs.\"\"\" def __init__(self, question_id: str, question: List[str], answers: Set[str], paragraph: SquadParagraph): self.question_id =", "_insert_text_to_paragraph(self, paragraph: SquadParagraph, train: bool): title_to_doc = self.train_title_to_document if train else self.dev_title_to_document paragraph.par_text", "SquadQuestionWithDistractors(SquadQuestion): def __init__(self, question_id: str, question: List[str], answers: Set[str], paragraph: SquadParagraph, distractors: List[SquadParagraph]):", "\"\"\" for paragraph in paragraphs: if any((x.par_id == paragraph.par_id and x.doc_title == paragraph.doc_title)", "add duplicates \"\"\" for paragraph in paragraphs: if any((x.par_id == paragraph.par_id and x.doc_title", "x in self.distractors): continue # self.distractors.append(paragraph.get_paragraph_without_text_pickling()) self.distractors.append(paragraph) def squad_question_to_relevance_question(squad_question: SquadQuestionWithDistractors) -> RelevanceQuestion: return", "= dir self.train_title_to_document = None self.dev_title_to_document = None @property def evidence(self): return None", "for x in squad_question.distractors]) class SquadRelevanceCorpus(Configurable): TRAIN_DOC_FILE = \"train_documents.pkl\" TRAIN_FILE = \"train_questions.pkl\" DEV_DOC_FILE", "re-load quickly. \"\"\" vec_file = join(self.dir, word_vec_name + self.WORD_VEC_SUFFIX + \".npy\") if isfile(vec_file):", "\"\"\" vec_file = join(self.dir, word_vec_name + self.WORD_VEC_SUFFIX + \".npy\") if isfile(vec_file): print(\"Loading word", "return None def get_vocab_file(self): self.get_vocab() return join(self.dir, self.VOCAB_FILE) def get_vocab(self): \"\"\" get all-lower", "get_resource_loader(self): return ResourceLoader(self.get_pruned_word_vecs) def _load_document_dict(self, train: bool): if train: if self.train_title_to_document is None:", "Set from os.path import join, exists, isfile, isdir from os import makedirs, listdir", "since loading word-vecs each time we startup can be a big pain, so", "List[str], pickle_text=True): self.doc_title = doc_title self.par_id = par_id self.par_text = par_text self.pickle_text =", "predict answers, we will deal with it.\"\"\" class SquadParagraph(object): def __init__(self, doc_title: str,", "= self._build_id_paragraph_dict() def _build_id_paragraph_dict(self): return {x.par_id: x for x in self.paragraphs} def get_par(self,", "for paragraph in paragraphs: if any((x.par_id == paragraph.par_id and x.doc_title == paragraph.doc_title) for", "None @property def evidence(self): return None def get_vocab_file(self): self.get_vocab() return join(self.dir, self.VOCAB_FILE) def", "we can re-load quickly. \"\"\" vec_file = join(self.dir, word_vec_name + self.WORD_VEC_SUFFIX + \".npy\")", "it.\"\"\" class SquadParagraph(object): def __init__(self, doc_title: str, par_id: int, par_text: List[str], pickle_text=True): self.doc_title", "dir = join(CORPUS_DIR, SquadRelevanceCorpus.NAME) # if isfile(dir) or (exists(dir) and len(listdir(dir))) > 0:", "When we want to predict answers, we will deal with it.\"\"\" class SquadParagraph(object):", "voc_list = sorted(list(voc)) with open(voc_file, \"w\") as f: for word in voc_list: f.write(word)", "x in distractors] self.distractors = distractors def add_distractors(self, paragraphs: List[SquadParagraph]): \"\"\" Doesn't add", "if train: if self.train_title_to_document is None: self.train_title_to_document = self._load(join(self.dir, self.TRAIN_DOC_FILE)) else: if self.dev_title_to_document", "SquadParagraph, train: bool): title_to_doc = self.train_title_to_document if train else self.dev_title_to_document paragraph.par_text = title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text", "import ResourceLoader \"\"\" Squad data. For now, leaving out answer spans. When we", "if self.dev_title_to_document is None: self.dev_title_to_document = self._load(join(self.dir, self.DEV_DOC_FILE)) def _insert_text_to_paragraph(self, paragraph: SquadParagraph, train:", "pickle from typing import List, Set from os.path import join, exists, isfile, isdir", "-> str: return f\"Title: {self.doc_title}, Id: {self.par_id}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.par_text) def", "or (exists(dir) and len(listdir(dir))) > 0: # raise ValueError(\"Directory %s already exists and", "= \"dev_questions.pkl\" NAME = \"squad\" VOCAB_FILE = \"squad_vocab.txt\" WORD_VEC_SUFFIX = \"_pruned\" @staticmethod def", "import List, Set from os.path import join, exists, isfile, isdir from os import", "self._insert_text_to_question(q, train) def get_train(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.TRAIN_FILE)) self._populate_questions(questions, train=True) return", "str: return f\"{self.question_id}: {' '.join(self.question)}\\nAnswer(s): {self.answers}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.paragraph.par_text) class SquadQuestionWithDistractors(SquadQuestion):", "open(join(dir, name), 'wb') as f: pickle.dump(data, f) def __init__(self): dir = join(CORPUS_DIR, self.NAME)", "self.VOCAB_FILE) def get_vocab(self): \"\"\" get all-lower cased unique words for this corpus, includes", "(SquadRelevanceCorpus.TRAIN_DOC_FILE, train_document_dict), (SquadRelevanceCorpus.DEV_DOC_FILE, dev_document_dict)]: if data is not None: with open(join(dir, name), 'wb')", "as f: pickle.dump(data, f) def __init__(self): dir = join(CORPUS_DIR, self.NAME) if not exists(dir)", "self._load(join(self.dir, self.DEV_DOC_FILE)) def _insert_text_to_paragraph(self, paragraph: SquadParagraph, train: bool): title_to_doc = self.train_title_to_document if train", "paragraph in paragraphs: if any((x.par_id == paragraph.par_id and x.doc_title == paragraph.doc_title) for x", "if not self.pickle_text: state = self.__dict__.copy() state['par_text'] = None return state return self.__dict__", "self.par_id = par_id self.par_text = par_text self.pickle_text = pickle_text @property def num_tokens(self): return", "\"w\") as f: for word in voc_list: f.write(word) f.write(\"\\n\") return voc_list def get_pruned_word_vecs(self,", "paragraph) # self.distractors = [x.get_paragraph_without_text_pickling() for x in distractors] self.distractors = distractors def", "[(SquadRelevanceCorpus.TRAIN_FILE, train), (SquadRelevanceCorpus.DEV_FILE, dev), (SquadRelevanceCorpus.TRAIN_DOC_FILE, train_document_dict), (SquadRelevanceCorpus.DEV_DOC_FILE, dev_document_dict)]: if data is not None:", "paragraph: SquadParagraph, distractors: List[SquadParagraph]): super().__init__(question_id, question, answers, paragraph) # self.distractors = [x.get_paragraph_without_text_pickling() for", "with open(vec_file, \"wb\") as f: pickle.dump(vecs, f) return vecs def get_resource_loader(self): return ResourceLoader(self.get_pruned_word_vecs)", "[question.paragraph] + question.distractors: self._insert_text_to_paragraph(par, train) def _populate_questions(self, questions: List[SquadQuestionWithDistractors], train: bool): self._load_document_dict(train) for", "paragraph: SquadParagraph, train: bool): title_to_doc = self.train_title_to_document if train else self.dev_title_to_document paragraph.par_text =", "documents have the same title!\") for name, data in [(SquadRelevanceCorpus.TRAIN_FILE, train), (SquadRelevanceCorpus.DEV_FILE, dev),", "import Configurable from hotpot.data_handling.data import RelevanceQuestion from hotpot.data_handling.word_vectors import load_word_vectors from hotpot.utils import", "been pruned to the case-insensitive vocab of this corpus. WARNING: this includes dev", "voc.update(x.lower() for x in question.question) for para in (question.distractors + [question.paragraph]): voc.update(x.lower() for", "\"\"\" Squad Question and paragraphs.\"\"\" def __init__(self, question_id: str, question: List[str], answers: Set[str],", "self.distractors = distractors def add_distractors(self, paragraphs: List[SquadParagraph]): \"\"\" Doesn't add duplicates \"\"\" for", "0: # raise ValueError(\"Directory %s already exists and is non-empty\" % dir) if", "(SquadRelevanceCorpus.DEV_FILE, dev), (SquadRelevanceCorpus.TRAIN_DOC_FILE, train_document_dict), (SquadRelevanceCorpus.DEV_DOC_FILE, dev_document_dict)]: if data is not None: with open(join(dir,", "pickle_text=False) def __repr__(self) -> str: return f\"Title: {self.doc_title}, Id: {self.par_id}\\n\" \\ f\"Paragraph:\\n\" +", "\"dev_documents.pkl\" DEV_FILE = \"dev_questions.pkl\" NAME = \"squad\" VOCAB_FILE = \"squad_vocab.txt\" WORD_VEC_SUFFIX = \"_pruned\"", "os.path import join, exists, isfile, isdir from os import makedirs, listdir from hotpot.config", "str: return f\"Title: {self.title}. Number of paragraphs: {len(self.paragraphs)}\" class SquadQuestion(object): \"\"\" Squad Question", "== paragraph.doc_title) for x in self.distractors): continue # self.distractors.append(paragraph.get_paragraph_without_text_pickling()) self.distractors.append(paragraph) def squad_question_to_relevance_question(squad_question: SquadQuestionWithDistractors)", "startup can be a big pain, so we cache the pruned vecs on-disk", "(exists(dir) and len(listdir(dir))) > 0: # raise ValueError(\"Directory %s already exists and is", "par_id self.par_text = par_text self.pickle_text = pickle_text @property def num_tokens(self): return len(self.par_text) def", "title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text paragraph.pickle_text = True # So that there will be no problems later", "for x in self.paragraphs} def get_par(self, par_id) -> SquadParagraph: return self.id_to_par[par_id] def add_par(self,", "\"\"\" get all-lower cased unique words for this corpus, includes train/dev/test files \"\"\"", "train: bool): title_to_doc = self.train_title_to_document if train else self.dev_title_to_document paragraph.par_text = title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text paragraph.pickle_text", "self.NAME) if not exists(dir) or not isdir(dir): raise ValueError(\"No directory %s, corpus not", "this document!\") if par.doc_title != self.title: raise ValueError(\"Paragraph title not matching document title!\")", "train/dev/test files \"\"\" voc_file = join(self.dir, self.VOCAB_FILE) if exists(voc_file): with open(voc_file, \"r\") as", "word in voc_list: f.write(word) f.write(\"\\n\") return voc_list def get_pruned_word_vecs(self, word_vec_name, voc=None): \"\"\" Loads", "train: if self.train_title_to_document is None: self.train_title_to_document = self._load(join(self.dir, self.TRAIN_DOC_FILE)) else: if self.dev_title_to_document is", "-> str: return f\"Title: {self.title}. Number of paragraphs: {len(self.paragraphs)}\" class SquadQuestion(object): \"\"\" Squad", "SquadDocument(object): def __init__(self, title: str, paragraphs: List[SquadParagraph]): self.title = title self.paragraphs = paragraphs", "{x.par_id: x for x in self.paragraphs} def get_par(self, par_id) -> SquadParagraph: return self.id_to_par[par_id]", "def __init__(self): dir = join(CORPUS_DIR, self.NAME) if not exists(dir) or not isdir(dir): raise", "title!\") for name, data in [(SquadRelevanceCorpus.TRAIN_FILE, train), (SquadRelevanceCorpus.DEV_FILE, dev), (SquadRelevanceCorpus.TRAIN_DOC_FILE, train_document_dict), (SquadRelevanceCorpus.DEV_DOC_FILE, dev_document_dict)]:", "= doc_title self.par_id = par_id self.par_text = par_text self.pickle_text = pickle_text @property def", "dev_documents: List[SquadDocument], dev: List[SquadQuestionWithDistractors]): dir = join(CORPUS_DIR, SquadRelevanceCorpus.NAME) # if isfile(dir) or (exists(dir)", "raise ValueError(\"This paragraph id already exists in this document!\") if par.doc_title != self.title:", "# .get_paragraph_without_text_pickling() def __repr__(self) -> str: return f\"{self.question_id}: {' '.join(self.question)}\\nAnswer(s): {self.answers}\\n\" \\ f\"Paragraph:\\n\"", "each time we startup can be a big pain, so we cache the", "raise ValueError(\"No directory %s, corpus not built yet?\" % dir) self.dir = dir", "f: return pickle.load(f) else: print(\"Building pruned word vec %s for %s\" % (self.name,", "self.id_to_par: raise ValueError(\"This paragraph id already exists in this document!\") if par.doc_title !=", "def squad_question_to_relevance_question(squad_question: SquadQuestionWithDistractors) -> RelevanceQuestion: return RelevanceQuestion(dataset_name='squad', question_id=squad_question.question_id, question_tokens=squad_question.question, supporting_facts=[squad_question.paragraph.par_text], distractors=[x.par_text for x", "question self.answers = answers self.paragraph = paragraph # .get_paragraph_without_text_pickling() def __repr__(self) -> str:", "'wb') as f: pickle.dump(data, f) def __init__(self): dir = join(CORPUS_DIR, self.NAME) if not", "non-empty\" % dir) if not exists(dir): makedirs(dir) train_document_dict = {doc.title: doc for doc", "case-insensitive vocab of this corpus. WARNING: this includes dev words This exists since", "def _populate_questions(self, questions: List[SquadQuestionWithDistractors], train: bool): self._load_document_dict(train) for q in questions: self._insert_text_to_question(q, train)", "f: for word in voc_list: f.write(word) f.write(\"\\n\") return voc_list def get_pruned_word_vecs(self, word_vec_name, voc=None):", "words for this corpus, includes train/dev/test files \"\"\" voc_file = join(self.dir, self.VOCAB_FILE) if", "= None return state return self.__dict__ class SquadDocument(object): def __init__(self, title: str, paragraphs:", "import join, exists, isfile, isdir from os import makedirs, listdir from hotpot.config import", "a big pain, so we cache the pruned vecs on-disk as a .npy", "question: SquadQuestionWithDistractors, train: bool): for par in [question.paragraph] + question.distractors: self._insert_text_to_paragraph(par, train) def", "ValueError(\"This paragraph id already exists in this document!\") if par.doc_title != self.title: raise", "dev_document_dict = {doc.title: doc for doc in dev_documents} if len(dev_document_dict) != len(dev_documents): raise", "and is non-empty\" % dir) if not exists(dir): makedirs(dir) train_document_dict = {doc.title: doc", "exists(file): return [] with open(file, \"rb\") as f: return pickle.load(f) def __getstate__(self): state", "self.DEV_DOC_FILE)) def _insert_text_to_paragraph(self, paragraph: SquadParagraph, train: bool): title_to_doc = self.train_title_to_document if train else", "self.VOCAB_FILE) if exists(voc_file): with open(voc_file, \"r\") as f: return [x.rstrip() for x in", "paragraphs: if any((x.par_id == paragraph.par_id and x.doc_title == paragraph.doc_title) for x in self.distractors):", "RelevanceQuestion(dataset_name='squad', question_id=squad_question.question_id, question_tokens=squad_question.question, supporting_facts=[squad_question.paragraph.par_text], distractors=[x.par_text for x in squad_question.distractors]) class SquadRelevanceCorpus(Configurable): TRAIN_DOC_FILE =", "have the same title!\") for name, data in [(SquadRelevanceCorpus.TRAIN_FILE, train), (SquadRelevanceCorpus.DEV_FILE, dev), (SquadRelevanceCorpus.TRAIN_DOC_FILE,", "List[SquadParagraph]): super().__init__(question_id, question, answers, paragraph) # self.distractors = [x.get_paragraph_without_text_pickling() for x in distractors]", "listdir from hotpot.config import CORPUS_DIR from hotpot.configurable import Configurable from hotpot.data_handling.data import RelevanceQuestion", "return f\"{self.question_id}: {' '.join(self.question)}\\nAnswer(s): {self.answers}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.paragraph.par_text) class SquadQuestionWithDistractors(SquadQuestion): def", "is None: self.train_title_to_document = self._load(join(self.dir, self.TRAIN_DOC_FILE)) else: if self.dev_title_to_document is None: self.dev_title_to_document =", "paragraph.pickle_text = True # So that there will be no problems later def", "(SquadRelevanceCorpus.DEV_DOC_FILE, dev_document_dict)]: if data is not None: with open(join(dir, name), 'wb') as f:", "self.title = title self.paragraphs = paragraphs self.id_to_par = self._build_id_paragraph_dict() def _build_id_paragraph_dict(self): return {x.par_id:", "print(\"Loading word vec %s for %s from cache\" % (word_vec_name, self.name)) with open(vec_file,", "get_dev(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.DEV_FILE)) self._populate_questions(questions, train=False) return questions def get_test(self)", "self.dir = dir self.train_title_to_document = None self.dev_title_to_document = None @property def evidence(self): return", "None: self.dev_title_to_document = self._load(join(self.dir, self.DEV_DOC_FILE)) def _insert_text_to_paragraph(self, paragraph: SquadParagraph, train: bool): title_to_doc =", "x in question.question) for para in (question.distractors + [question.paragraph]): voc.update(x.lower() for x in", "f: pickle.dump(vecs, f) return vecs def get_resource_loader(self): return ResourceLoader(self.get_pruned_word_vecs) def _load_document_dict(self, train: bool):", "def get_par(self, par_id) -> SquadParagraph: return self.id_to_par[par_id] def add_par(self, par: SquadParagraph): if par.par_id", "None self.dev_title_to_document = None @property def evidence(self): return None def get_vocab_file(self): self.get_vocab() return", "questions = self._load(join(self.dir, self.DEV_FILE)) self._populate_questions(questions, train=False) return questions def get_test(self) -> List[SquadQuestionWithDistractors]: return", "the pruned vecs on-disk as a .npy file we can re-load quickly. \"\"\"", "word_vec_name)) voc = self.get_vocab() vecs = load_word_vectors(word_vec_name, voc) with open(vec_file, \"wb\") as f:", "continue # self.distractors.append(paragraph.get_paragraph_without_text_pickling()) self.distractors.append(paragraph) def squad_question_to_relevance_question(squad_question: SquadQuestionWithDistractors) -> RelevanceQuestion: return RelevanceQuestion(dataset_name='squad', question_id=squad_question.question_id, question_tokens=squad_question.question,", "fn in [self.get_train, self.get_dev, self.get_test]: for question in fn(): voc.update(x.lower() for x in", "train=False) return questions def get_test(self) -> List[SquadQuestionWithDistractors]: return [] def _load(self, file): if", "return [] def _load(self, file): if not exists(file): return [] with open(file, \"rb\")", "exists(voc_file): with open(voc_file, \"r\") as f: return [x.rstrip() for x in f] else:", "set() for fn in [self.get_train, self.get_dev, self.get_test]: for question in fn(): voc.update(x.lower() for", "[self.get_train, self.get_dev, self.get_test]: for question in fn(): voc.update(x.lower() for x in question.question) for", "None def get_vocab_file(self): self.get_vocab() return join(self.dir, self.VOCAB_FILE) def get_vocab(self): \"\"\" get all-lower cased", "supporting_facts=[squad_question.paragraph.par_text], distractors=[x.par_text for x in squad_question.distractors]) class SquadRelevanceCorpus(Configurable): TRAIN_DOC_FILE = \"train_documents.pkl\" TRAIN_FILE =", "= load_word_vectors(word_vec_name, voc) with open(vec_file, \"wb\") as f: pickle.dump(vecs, f) return vecs def", "-> str: return f\"{self.question_id}: {' '.join(self.question)}\\nAnswer(s): {self.answers}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.paragraph.par_text) class", "__init__(self, title: str, paragraphs: List[SquadParagraph]): self.title = title self.paragraphs = paragraphs self.id_to_par =", "f) def __init__(self): dir = join(CORPUS_DIR, self.NAME) if not exists(dir) or not isdir(dir):", "directory %s, corpus not built yet?\" % dir) self.dir = dir self.train_title_to_document =", "in paragraphs: if any((x.par_id == paragraph.par_id and x.doc_title == paragraph.doc_title) for x in", "from hotpot.data_handling.data import RelevanceQuestion from hotpot.data_handling.word_vectors import load_word_vectors from hotpot.utils import ResourceLoader \"\"\"", "if train else self.dev_title_to_document paragraph.par_text = title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text paragraph.pickle_text = True # So that", "self.paragraphs.append(SquadParagraph(par.doc_title, par.par_id, par.par_text, pickle_text=True)) self.id_to_par[par.par_id] = self.paragraphs[-1] def __repr__(self) -> str: return f\"Title:", "= title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text paragraph.pickle_text = True # So that there will be no problems", "__init__(self, question_id: str, question: List[str], answers: Set[str], paragraph: SquadParagraph, distractors: List[SquadParagraph]): super().__init__(question_id, question,", "{len(self.paragraphs)}\" class SquadQuestion(object): \"\"\" Squad Question and paragraphs.\"\"\" def __init__(self, question_id: str, question:", "__repr__(self) -> str: return f\"{self.question_id}: {' '.join(self.question)}\\nAnswer(s): {self.answers}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.paragraph.par_text)", "\\ f\"Paragraph:\\n\" + ' '.join(self.paragraph.par_text) class SquadQuestionWithDistractors(SquadQuestion): def __init__(self, question_id: str, question: List[str],", "documents have the same title!\") dev_document_dict = {doc.title: doc for doc in dev_documents}", "= None @property def evidence(self): return None def get_vocab_file(self): self.get_vocab() return join(self.dir, self.VOCAB_FILE)", "[question.paragraph]): voc.update(x.lower() for x in para.par_text) voc_list = sorted(list(voc)) with open(voc_file, \"w\") as", "paragraph.par_text = title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text paragraph.pickle_text = True # So that there will be no", "train: bool): if train: if self.train_title_to_document is None: self.train_title_to_document = self._load(join(self.dir, self.TRAIN_DOC_FILE)) else:", "self.id_to_par[par_id] def add_par(self, par: SquadParagraph): if par.par_id in self.id_to_par: raise ValueError(\"This paragraph id", "if isfile(dir) or (exists(dir) and len(listdir(dir))) > 0: # raise ValueError(\"Directory %s already", "questions = self._load(join(self.dir, self.TRAIN_FILE)) self._populate_questions(questions, train=True) return questions def get_dev(self) -> List[SquadQuestionWithDistractors]: questions", "\"rb\") as f: return pickle.load(f) else: print(\"Building pruned word vec %s for %s\"", "with open(voc_file, \"w\") as f: for word in voc_list: f.write(word) f.write(\"\\n\") return voc_list", "exists and is non-empty\" % dir) if not exists(dir): makedirs(dir) train_document_dict = {doc.title:", "self.par_id, self.par_text, pickle_text=False) def __repr__(self) -> str: return f\"Title: {self.doc_title}, Id: {self.par_id}\\n\" \\", "question_tokens=squad_question.question, supporting_facts=[squad_question.paragraph.par_text], distractors=[x.par_text for x in squad_question.distractors]) class SquadRelevanceCorpus(Configurable): TRAIN_DOC_FILE = \"train_documents.pkl\" TRAIN_FILE", "self.distractors): continue # self.distractors.append(paragraph.get_paragraph_without_text_pickling()) self.distractors.append(paragraph) def squad_question_to_relevance_question(squad_question: SquadQuestionWithDistractors) -> RelevanceQuestion: return RelevanceQuestion(dataset_name='squad', question_id=squad_question.question_id,", "= question self.answers = answers self.paragraph = paragraph # .get_paragraph_without_text_pickling() def __repr__(self) ->", "!= self.title: raise ValueError(\"Paragraph title not matching document title!\") self.paragraphs.append(SquadParagraph(par.doc_title, par.par_id, par.par_text, pickle_text=True))", "def __repr__(self) -> str: return f\"Title: {self.title}. Number of paragraphs: {len(self.paragraphs)}\" class SquadQuestion(object):", "document title!\") self.paragraphs.append(SquadParagraph(par.doc_title, par.par_id, par.par_text, pickle_text=True)) self.id_to_par[par.par_id] = self.paragraphs[-1] def __repr__(self) -> str:", "vecs def get_resource_loader(self): return ResourceLoader(self.get_pruned_word_vecs) def _load_document_dict(self, train: bool): if train: if self.train_title_to_document", "answers, we will deal with it.\"\"\" class SquadParagraph(object): def __init__(self, doc_title: str, par_id:", "\"train_documents.pkl\" TRAIN_FILE = \"train_questions.pkl\" DEV_DOC_FILE = \"dev_documents.pkl\" DEV_FILE = \"dev_questions.pkl\" NAME = \"squad\"", "x in squad_question.distractors]) class SquadRelevanceCorpus(Configurable): TRAIN_DOC_FILE = \"train_documents.pkl\" TRAIN_FILE = \"train_questions.pkl\" DEV_DOC_FILE =", "join, exists, isfile, isdir from os import makedirs, listdir from hotpot.config import CORPUS_DIR", "vecs on-disk as a .npy file we can re-load quickly. \"\"\" vec_file =", "or not isdir(dir): raise ValueError(\"No directory %s, corpus not built yet?\" % dir)", "from typing import List, Set from os.path import join, exists, isfile, isdir from", "is None: self.dev_title_to_document = self._load(join(self.dir, self.DEV_DOC_FILE)) def _insert_text_to_paragraph(self, paragraph: SquadParagraph, train: bool): title_to_doc", "SquadQuestionWithDistractors) -> RelevanceQuestion: return RelevanceQuestion(dataset_name='squad', question_id=squad_question.question_id, question_tokens=squad_question.question, supporting_facts=[squad_question.paragraph.par_text], distractors=[x.par_text for x in squad_question.distractors])", "that have been pruned to the case-insensitive vocab of this corpus. WARNING: this", "= sorted(list(voc)) with open(voc_file, \"w\") as f: for word in voc_list: f.write(word) f.write(\"\\n\")", "raise ValueError(\"different dev documents have the same title!\") for name, data in [(SquadRelevanceCorpus.TRAIN_FILE,", "from hotpot.data_handling.word_vectors import load_word_vectors from hotpot.utils import ResourceLoader \"\"\" Squad data. For now,", "len(dev_document_dict) != len(dev_documents): raise ValueError(\"different dev documents have the same title!\") for name,", "ValueError(\"different dev documents have the same title!\") for name, data in [(SquadRelevanceCorpus.TRAIN_FILE, train),", "self.__dict__.copy() state['train_title_to_document'] = None state['dev_title_to_document'] = None return state def __setstate__(self, state): self.__dict__", "> 0: # raise ValueError(\"Directory %s already exists and is non-empty\" % dir)", "exists in this document!\") if par.doc_title != self.title: raise ValueError(\"Paragraph title not matching", "{doc.title: doc for doc in dev_documents} if len(dev_document_dict) != len(dev_documents): raise ValueError(\"different dev", "for x in para.par_text) voc_list = sorted(list(voc)) with open(voc_file, \"w\") as f: for", "corpus, includes train/dev/test files \"\"\" voc_file = join(self.dir, self.VOCAB_FILE) if exists(voc_file): with open(voc_file,", "pickle_text=True): self.doc_title = doc_title self.par_id = par_id self.par_text = par_text self.pickle_text = pickle_text", "paragraph.doc_title) for x in self.distractors): continue # self.distractors.append(paragraph.get_paragraph_without_text_pickling()) self.distractors.append(paragraph) def squad_question_to_relevance_question(squad_question: SquadQuestionWithDistractors) ->", "{self.title}. Number of paragraphs: {len(self.paragraphs)}\" class SquadQuestion(object): \"\"\" Squad Question and paragraphs.\"\"\" def", "self.get_vocab() vecs = load_word_vectors(word_vec_name, voc) with open(vec_file, \"wb\") as f: pickle.dump(vecs, f) return", "vectors that have been pruned to the case-insensitive vocab of this corpus. WARNING:", "will be no problems later def _insert_text_to_question(self, question: SquadQuestionWithDistractors, train: bool): for par", "True # So that there will be no problems later def _insert_text_to_question(self, question:", "List[SquadParagraph]): \"\"\" Doesn't add duplicates \"\"\" for paragraph in paragraphs: if any((x.par_id ==", "title_to_doc = self.train_title_to_document if train else self.dev_title_to_document paragraph.par_text = title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text paragraph.pickle_text = True", "if par.par_id in self.id_to_par: raise ValueError(\"This paragraph id already exists in this document!\")", "VOCAB_FILE = \"squad_vocab.txt\" WORD_VEC_SUFFIX = \"_pruned\" @staticmethod def make_corpus(train_documents: List[SquadDocument], train: List[SquadQuestionWithDistractors], dev_documents:", "join(self.dir, word_vec_name + self.WORD_VEC_SUFFIX + \".npy\") if isfile(vec_file): print(\"Loading word vec %s for", "__getstate__(self): if not self.pickle_text: state = self.__dict__.copy() state['par_text'] = None return state return", "if data is not None: with open(join(dir, name), 'wb') as f: pickle.dump(data, f)", "state['par_text'] = None return state return self.__dict__ class SquadDocument(object): def __init__(self, title: str,", "= self._load(join(self.dir, self.TRAIN_DOC_FILE)) else: if self.dev_title_to_document is None: self.dev_title_to_document = self._load(join(self.dir, self.DEV_DOC_FILE)) def", "x in f] else: voc = set() for fn in [self.get_train, self.get_dev, self.get_test]:", "spans. When we want to predict answers, we will deal with it.\"\"\" class", "self.doc_title = doc_title self.par_id = par_id self.par_text = par_text self.pickle_text = pickle_text @property", "class SquadQuestion(object): \"\"\" Squad Question and paragraphs.\"\"\" def __init__(self, question_id: str, question: List[str],", "any((x.par_id == paragraph.par_id and x.doc_title == paragraph.doc_title) for x in self.distractors): continue #", "self.distractors.append(paragraph.get_paragraph_without_text_pickling()) self.distractors.append(paragraph) def squad_question_to_relevance_question(squad_question: SquadQuestionWithDistractors) -> RelevanceQuestion: return RelevanceQuestion(dataset_name='squad', question_id=squad_question.question_id, question_tokens=squad_question.question, supporting_facts=[squad_question.paragraph.par_text], distractors=[x.par_text", "= self.get_vocab() vecs = load_word_vectors(word_vec_name, voc) with open(vec_file, \"wb\") as f: pickle.dump(vecs, f)", "_load_document_dict(self, train: bool): if train: if self.train_title_to_document is None: self.train_title_to_document = self._load(join(self.dir, self.TRAIN_DOC_FILE))", "% (self.name, word_vec_name)) voc = self.get_vocab() vecs = load_word_vectors(word_vec_name, voc) with open(vec_file, \"wb\")", "as f: return pickle.load(f) else: print(\"Building pruned word vec %s for %s\" %", "join(self.dir, self.VOCAB_FILE) def get_vocab(self): \"\"\" get all-lower cased unique words for this corpus,", "word vec %s for %s\" % (self.name, word_vec_name)) voc = self.get_vocab() vecs =", "str, question: List[str], answers: Set[str], paragraph: SquadParagraph): self.question_id = question_id self.question = question", "self.train_title_to_document = None self.dev_title_to_document = None @property def evidence(self): return None def get_vocab_file(self):", "import makedirs, listdir from hotpot.config import CORPUS_DIR from hotpot.configurable import Configurable from hotpot.data_handling.data", "self.question_id = question_id self.question = question self.answers = answers self.paragraph = paragraph #", "from os import makedirs, listdir from hotpot.config import CORPUS_DIR from hotpot.configurable import Configurable", "class SquadParagraph(object): def __init__(self, doc_title: str, par_id: int, par_text: List[str], pickle_text=True): self.doc_title =", "not isdir(dir): raise ValueError(\"No directory %s, corpus not built yet?\" % dir) self.dir", "os import makedirs, listdir from hotpot.config import CORPUS_DIR from hotpot.configurable import Configurable from", "return SquadParagraph(self.doc_title, self.par_id, self.par_text, pickle_text=False) def __repr__(self) -> str: return f\"Title: {self.doc_title}, Id:", "+ ' '.join(self.paragraph.par_text) class SquadQuestionWithDistractors(SquadQuestion): def __init__(self, question_id: str, question: List[str], answers: Set[str],", "def get_vocab_file(self): self.get_vocab() return join(self.dir, self.VOCAB_FILE) def get_vocab(self): \"\"\" get all-lower cased unique", "vec %s for %s from cache\" % (word_vec_name, self.name)) with open(vec_file, \"rb\") as", "from hotpot.utils import ResourceLoader \"\"\" Squad data. For now, leaving out answer spans.", "paragraph: SquadParagraph): self.question_id = question_id self.question = question self.answers = answers self.paragraph =", "vec_file = join(self.dir, word_vec_name + self.WORD_VEC_SUFFIX + \".npy\") if isfile(vec_file): print(\"Loading word vec", "%s from cache\" % (word_vec_name, self.name)) with open(vec_file, \"rb\") as f: return pickle.load(f)", "not built yet?\" % dir) self.dir = dir self.train_title_to_document = None self.dev_title_to_document =", "pain, so we cache the pruned vecs on-disk as a .npy file we", "== paragraph.par_id and x.doc_title == paragraph.doc_title) for x in self.distractors): continue # self.distractors.append(paragraph.get_paragraph_without_text_pickling())", "if not exists(file): return [] with open(file, \"rb\") as f: return pickle.load(f) def", "later def _insert_text_to_question(self, question: SquadQuestionWithDistractors, train: bool): for par in [question.paragraph] + question.distractors:", "= self._load(join(self.dir, self.TRAIN_FILE)) self._populate_questions(questions, train=True) return questions def get_dev(self) -> List[SquadQuestionWithDistractors]: questions =", "str: return f\"Title: {self.doc_title}, Id: {self.par_id}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.par_text) def __getstate__(self):", "self.dev_title_to_document = None @property def evidence(self): return None def get_vocab_file(self): self.get_vocab() return join(self.dir,", "def __init__(self, question_id: str, question: List[str], answers: Set[str], paragraph: SquadParagraph, distractors: List[SquadParagraph]): super().__init__(question_id,", "want to predict answers, we will deal with it.\"\"\" class SquadParagraph(object): def __init__(self,", "f) return vecs def get_resource_loader(self): return ResourceLoader(self.get_pruned_word_vecs) def _load_document_dict(self, train: bool): if train:", "is non-empty\" % dir) if not exists(dir): makedirs(dir) train_document_dict = {doc.title: doc for", "hotpot.data_handling.data import RelevanceQuestion from hotpot.data_handling.word_vectors import load_word_vectors from hotpot.utils import ResourceLoader \"\"\" Squad", "_populate_questions(self, questions: List[SquadQuestionWithDistractors], train: bool): self._load_document_dict(train) for q in questions: self._insert_text_to_question(q, train) def", "f\"Paragraph:\\n\" + ' '.join(self.paragraph.par_text) class SquadQuestionWithDistractors(SquadQuestion): def __init__(self, question_id: str, question: List[str], answers:", "deal with it.\"\"\" class SquadParagraph(object): def __init__(self, doc_title: str, par_id: int, par_text: List[str],", "-> SquadParagraph: return self.id_to_par[par_id] def add_par(self, par: SquadParagraph): if par.par_id in self.id_to_par: raise", "return f\"Title: {self.title}. Number of paragraphs: {len(self.paragraphs)}\" class SquadQuestion(object): \"\"\" Squad Question and", "paragraph id already exists in this document!\") if par.doc_title != self.title: raise ValueError(\"Paragraph", "doc for doc in train_documents} if len(train_document_dict) != len(train_documents): raise ValueError(\"different train documents", "= \"dev_documents.pkl\" DEV_FILE = \"dev_questions.pkl\" NAME = \"squad\" VOCAB_FILE = \"squad_vocab.txt\" WORD_VEC_SUFFIX =", "voc = self.get_vocab() vecs = load_word_vectors(word_vec_name, voc) with open(vec_file, \"wb\") as f: pickle.dump(vecs,", "in self.paragraphs} def get_par(self, par_id) -> SquadParagraph: return self.id_to_par[par_id] def add_par(self, par: SquadParagraph):", "answers self.paragraph = paragraph # .get_paragraph_without_text_pickling() def __repr__(self) -> str: return f\"{self.question_id}: {'", "get_pruned_word_vecs(self, word_vec_name, voc=None): \"\"\" Loads word vectors that have been pruned to the", "= set() for fn in [self.get_train, self.get_dev, self.get_test]: for question in fn(): voc.update(x.lower()", "q in questions: self._insert_text_to_question(q, train) def get_train(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.TRAIN_FILE))", "in squad_question.distractors]) class SquadRelevanceCorpus(Configurable): TRAIN_DOC_FILE = \"train_documents.pkl\" TRAIN_FILE = \"train_questions.pkl\" DEV_DOC_FILE = \"dev_documents.pkl\"", "def __repr__(self) -> str: return f\"Title: {self.doc_title}, Id: {self.par_id}\\n\" \\ f\"Paragraph:\\n\" + '", ".npy file we can re-load quickly. \"\"\" vec_file = join(self.dir, word_vec_name + self.WORD_VEC_SUFFIX", "self._build_id_paragraph_dict() def _build_id_paragraph_dict(self): return {x.par_id: x for x in self.paragraphs} def get_par(self, par_id)", "return questions def get_dev(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.DEV_FILE)) self._populate_questions(questions, train=False) return", "already exists in this document!\") if par.doc_title != self.title: raise ValueError(\"Paragraph title not", "state return self.__dict__ class SquadDocument(object): def __init__(self, title: str, paragraphs: List[SquadParagraph]): self.title =", "if len(train_document_dict) != len(train_documents): raise ValueError(\"different train documents have the same title!\") dev_document_dict", "self.pickle_text = pickle_text @property def num_tokens(self): return len(self.par_text) def get_paragraph_without_text_pickling(self): return SquadParagraph(self.doc_title, self.par_id,", "\".npy\") if isfile(vec_file): print(\"Loading word vec %s for %s from cache\" % (word_vec_name,", "word-vecs each time we startup can be a big pain, so we cache", "par.par_id, par.par_text, pickle_text=True)) self.id_to_par[par.par_id] = self.paragraphs[-1] def __repr__(self) -> str: return f\"Title: {self.title}.", "same title!\") for name, data in [(SquadRelevanceCorpus.TRAIN_FILE, train), (SquadRelevanceCorpus.DEV_FILE, dev), (SquadRelevanceCorpus.TRAIN_DOC_FILE, train_document_dict), (SquadRelevanceCorpus.DEV_DOC_FILE,", "def _load(self, file): if not exists(file): return [] with open(file, \"rb\") as f:", "def get_dev(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.DEV_FILE)) self._populate_questions(questions, train=False) return questions def", "not self.pickle_text: state = self.__dict__.copy() state['par_text'] = None return state return self.__dict__ class", "!= len(train_documents): raise ValueError(\"different train documents have the same title!\") dev_document_dict = {doc.title:", "self.TRAIN_FILE)) self._populate_questions(questions, train=True) return questions def get_dev(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.DEV_FILE))", "cased unique words for this corpus, includes train/dev/test files \"\"\" voc_file = join(self.dir,", "self.get_dev, self.get_test]: for question in fn(): voc.update(x.lower() for x in question.question) for para", "will deal with it.\"\"\" class SquadParagraph(object): def __init__(self, doc_title: str, par_id: int, par_text:", "def get_test(self) -> List[SquadQuestionWithDistractors]: return [] def _load(self, file): if not exists(file): return", "Set[str], paragraph: SquadParagraph, distractors: List[SquadParagraph]): super().__init__(question_id, question, answers, paragraph) # self.distractors = [x.get_paragraph_without_text_pickling()", "is not None: with open(join(dir, name), 'wb') as f: pickle.dump(data, f) def __init__(self):", "return questions def get_test(self) -> List[SquadQuestionWithDistractors]: return [] def _load(self, file): if not", "in voc_list: f.write(word) f.write(\"\\n\") return voc_list def get_pruned_word_vecs(self, word_vec_name, voc=None): \"\"\" Loads word", "exists(dir) or not isdir(dir): raise ValueError(\"No directory %s, corpus not built yet?\" %", "all-lower cased unique words for this corpus, includes train/dev/test files \"\"\" voc_file =", "@property def num_tokens(self): return len(self.par_text) def get_paragraph_without_text_pickling(self): return SquadParagraph(self.doc_title, self.par_id, self.par_text, pickle_text=False) def", "f\"Title: {self.doc_title}, Id: {self.par_id}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.par_text) def __getstate__(self): if not", "return len(self.par_text) def get_paragraph_without_text_pickling(self): return SquadParagraph(self.doc_title, self.par_id, self.par_text, pickle_text=False) def __repr__(self) -> str:", "'.join(self.question)}\\nAnswer(s): {self.answers}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.paragraph.par_text) class SquadQuestionWithDistractors(SquadQuestion): def __init__(self, question_id: str,", "paragraphs self.id_to_par = self._build_id_paragraph_dict() def _build_id_paragraph_dict(self): return {x.par_id: x for x in self.paragraphs}", "self._load_document_dict(train) for q in questions: self._insert_text_to_question(q, train) def get_train(self) -> List[SquadQuestionWithDistractors]: questions =", "+ ' '.join(self.par_text) def __getstate__(self): if not self.pickle_text: state = self.__dict__.copy() state['par_text'] =", "name), 'wb') as f: pickle.dump(data, f) def __init__(self): dir = join(CORPUS_DIR, self.NAME) if", "# So that there will be no problems later def _insert_text_to_question(self, question: SquadQuestionWithDistractors,", "open(voc_file, \"r\") as f: return [x.rstrip() for x in f] else: voc =", "= distractors def add_distractors(self, paragraphs: List[SquadParagraph]): \"\"\" Doesn't add duplicates \"\"\" for paragraph", "get_test(self) -> List[SquadQuestionWithDistractors]: return [] def _load(self, file): if not exists(file): return []", "for x in question.question) for para in (question.distractors + [question.paragraph]): voc.update(x.lower() for x", "SquadParagraph): self.question_id = question_id self.question = question self.answers = answers self.paragraph = paragraph", "f: return pickle.load(f) def __getstate__(self): state = self.__dict__.copy() state['train_title_to_document'] = None state['dev_title_to_document'] =", "train: bool): self._load_document_dict(train) for q in questions: self._insert_text_to_question(q, train) def get_train(self) -> List[SquadQuestionWithDistractors]:", "questions: List[SquadQuestionWithDistractors], train: bool): self._load_document_dict(train) for q in questions: self._insert_text_to_question(q, train) def get_train(self)", "else: voc = set() for fn in [self.get_train, self.get_dev, self.get_test]: for question in", "hotpot.utils import ResourceLoader \"\"\" Squad data. For now, leaving out answer spans. When", "state = self.__dict__.copy() state['par_text'] = None return state return self.__dict__ class SquadDocument(object): def", "List[str], answers: Set[str], paragraph: SquadParagraph): self.question_id = question_id self.question = question self.answers =", "in dev_documents} if len(dev_document_dict) != len(dev_documents): raise ValueError(\"different dev documents have the same", "if not exists(dir) or not isdir(dir): raise ValueError(\"No directory %s, corpus not built", "exists since loading word-vecs each time we startup can be a big pain,", "ValueError(\"Directory %s already exists and is non-empty\" % dir) if not exists(dir): makedirs(dir)", "ValueError(\"different train documents have the same title!\") dev_document_dict = {doc.title: doc for doc", "\"\"\" Squad data. For now, leaving out answer spans. When we want to", "num_tokens(self): return len(self.par_text) def get_paragraph_without_text_pickling(self): return SquadParagraph(self.doc_title, self.par_id, self.par_text, pickle_text=False) def __repr__(self) ->", "def evidence(self): return None def get_vocab_file(self): self.get_vocab() return join(self.dir, self.VOCAB_FILE) def get_vocab(self): \"\"\"", "[x.get_paragraph_without_text_pickling() for x in distractors] self.distractors = distractors def add_distractors(self, paragraphs: List[SquadParagraph]): \"\"\"", "train) def _populate_questions(self, questions: List[SquadQuestionWithDistractors], train: bool): self._load_document_dict(train) for q in questions: self._insert_text_to_question(q,", "super().__init__(question_id, question, answers, paragraph) # self.distractors = [x.get_paragraph_without_text_pickling() for x in distractors] self.distractors", "TRAIN_FILE = \"train_questions.pkl\" DEV_DOC_FILE = \"dev_documents.pkl\" DEV_FILE = \"dev_questions.pkl\" NAME = \"squad\" VOCAB_FILE", "class SquadRelevanceCorpus(Configurable): TRAIN_DOC_FILE = \"train_documents.pkl\" TRAIN_FILE = \"train_questions.pkl\" DEV_DOC_FILE = \"dev_documents.pkl\" DEV_FILE =", "self.train_title_to_document is None: self.train_title_to_document = self._load(join(self.dir, self.TRAIN_DOC_FILE)) else: if self.dev_title_to_document is None: self.dev_title_to_document", "List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.DEV_FILE)) self._populate_questions(questions, train=False) return questions def get_test(self) -> List[SquadQuestionWithDistractors]:", "str, par_id: int, par_text: List[str], pickle_text=True): self.doc_title = doc_title self.par_id = par_id self.par_text", "for x in self.distractors): continue # self.distractors.append(paragraph.get_paragraph_without_text_pickling()) self.distractors.append(paragraph) def squad_question_to_relevance_question(squad_question: SquadQuestionWithDistractors) -> RelevanceQuestion:", "paragraphs: List[SquadParagraph]): self.title = title self.paragraphs = paragraphs self.id_to_par = self._build_id_paragraph_dict() def _build_id_paragraph_dict(self):", "par_text self.pickle_text = pickle_text @property def num_tokens(self): return len(self.par_text) def get_paragraph_without_text_pickling(self): return SquadParagraph(self.doc_title,", "title self.paragraphs = paragraphs self.id_to_par = self._build_id_paragraph_dict() def _build_id_paragraph_dict(self): return {x.par_id: x for", "self.get_vocab() return join(self.dir, self.VOCAB_FILE) def get_vocab(self): \"\"\" get all-lower cased unique words for", "self.dev_title_to_document is None: self.dev_title_to_document = self._load(join(self.dir, self.DEV_DOC_FILE)) def _insert_text_to_paragraph(self, paragraph: SquadParagraph, train: bool):", "'.join(self.paragraph.par_text) class SquadQuestionWithDistractors(SquadQuestion): def __init__(self, question_id: str, question: List[str], answers: Set[str], paragraph: SquadParagraph,", "not exists(file): return [] with open(file, \"rb\") as f: return pickle.load(f) def __getstate__(self):", "makedirs, listdir from hotpot.config import CORPUS_DIR from hotpot.configurable import Configurable from hotpot.data_handling.data import", "dir) self.dir = dir self.train_title_to_document = None self.dev_title_to_document = None @property def evidence(self):", "For now, leaving out answer spans. When we want to predict answers, we", "return ResourceLoader(self.get_pruned_word_vecs) def _load_document_dict(self, train: bool): if train: if self.train_title_to_document is None: self.train_title_to_document", "return state return self.__dict__ class SquadDocument(object): def __init__(self, title: str, paragraphs: List[SquadParagraph]): self.title", "self.WORD_VEC_SUFFIX + \".npy\") if isfile(vec_file): print(\"Loading word vec %s for %s from cache\"", "load_word_vectors(word_vec_name, voc) with open(vec_file, \"wb\") as f: pickle.dump(vecs, f) return vecs def get_resource_loader(self):", "else self.dev_title_to_document paragraph.par_text = title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text paragraph.pickle_text = True # So that there will", "in [question.paragraph] + question.distractors: self._insert_text_to_paragraph(par, train) def _populate_questions(self, questions: List[SquadQuestionWithDistractors], train: bool): self._load_document_dict(train)", "dir) if not exists(dir): makedirs(dir) train_document_dict = {doc.title: doc for doc in train_documents}", "= title self.paragraphs = paragraphs self.id_to_par = self._build_id_paragraph_dict() def _build_id_paragraph_dict(self): return {x.par_id: x", "bool): if train: if self.train_title_to_document is None: self.train_title_to_document = self._load(join(self.dir, self.TRAIN_DOC_FILE)) else: if", "%s, corpus not built yet?\" % dir) self.dir = dir self.train_title_to_document = None", "ResourceLoader(self.get_pruned_word_vecs) def _load_document_dict(self, train: bool): if train: if self.train_title_to_document is None: self.train_title_to_document =", "pickle.load(f) def __getstate__(self): state = self.__dict__.copy() state['train_title_to_document'] = None state['dev_title_to_document'] = None return", "+ question.distractors: self._insert_text_to_paragraph(par, train) def _populate_questions(self, questions: List[SquadQuestionWithDistractors], train: bool): self._load_document_dict(train) for q", "__repr__(self) -> str: return f\"Title: {self.doc_title}, Id: {self.par_id}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.par_text)", "question.question) for para in (question.distractors + [question.paragraph]): voc.update(x.lower() for x in para.par_text) voc_list", "f\"{self.question_id}: {' '.join(self.question)}\\nAnswer(s): {self.answers}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.paragraph.par_text) class SquadQuestionWithDistractors(SquadQuestion): def __init__(self,", "-> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.TRAIN_FILE)) self._populate_questions(questions, train=True) return questions def get_dev(self) ->", "List[str], answers: Set[str], paragraph: SquadParagraph, distractors: List[SquadParagraph]): super().__init__(question_id, question, answers, paragraph) # self.distractors", "SquadParagraph(self.doc_title, self.par_id, self.par_text, pickle_text=False) def __repr__(self) -> str: return f\"Title: {self.doc_title}, Id: {self.par_id}\\n\"", "= \"train_documents.pkl\" TRAIN_FILE = \"train_questions.pkl\" DEV_DOC_FILE = \"dev_documents.pkl\" DEV_FILE = \"dev_questions.pkl\" NAME =", "self.title: raise ValueError(\"Paragraph title not matching document title!\") self.paragraphs.append(SquadParagraph(par.doc_title, par.par_id, par.par_text, pickle_text=True)) self.id_to_par[par.par_id]", "open(vec_file, \"rb\") as f: return pickle.load(f) else: print(\"Building pruned word vec %s for", "self.answers = answers self.paragraph = paragraph # .get_paragraph_without_text_pickling() def __repr__(self) -> str: return", "doc_title: str, par_id: int, par_text: List[str], pickle_text=True): self.doc_title = doc_title self.par_id = par_id", "cache\" % (word_vec_name, self.name)) with open(vec_file, \"rb\") as f: return pickle.load(f) else: print(\"Building", "corpus. WARNING: this includes dev words This exists since loading word-vecs each time", "of this corpus. WARNING: this includes dev words This exists since loading word-vecs", "def _load_document_dict(self, train: bool): if train: if self.train_title_to_document is None: self.train_title_to_document = self._load(join(self.dir,", "self._load(join(self.dir, self.TRAIN_DOC_FILE)) else: if self.dev_title_to_document is None: self.dev_title_to_document = self._load(join(self.dir, self.DEV_DOC_FILE)) def _insert_text_to_paragraph(self,", "make_corpus(train_documents: List[SquadDocument], train: List[SquadQuestionWithDistractors], dev_documents: List[SquadDocument], dev: List[SquadQuestionWithDistractors]): dir = join(CORPUS_DIR, SquadRelevanceCorpus.NAME) #", "get all-lower cased unique words for this corpus, includes train/dev/test files \"\"\" voc_file", "get_par(self, par_id) -> SquadParagraph: return self.id_to_par[par_id] def add_par(self, par: SquadParagraph): if par.par_id in", "squad_question.distractors]) class SquadRelevanceCorpus(Configurable): TRAIN_DOC_FILE = \"train_documents.pkl\" TRAIN_FILE = \"train_questions.pkl\" DEV_DOC_FILE = \"dev_documents.pkl\" DEV_FILE", "raise ValueError(\"Paragraph title not matching document title!\") self.paragraphs.append(SquadParagraph(par.doc_title, par.par_id, par.par_text, pickle_text=True)) self.id_to_par[par.par_id] =", "List[SquadQuestionWithDistractors], train: bool): self._load_document_dict(train) for q in questions: self._insert_text_to_question(q, train) def get_train(self) ->", "question, answers, paragraph) # self.distractors = [x.get_paragraph_without_text_pickling() for x in distractors] self.distractors =", "__repr__(self) -> str: return f\"Title: {self.title}. Number of paragraphs: {len(self.paragraphs)}\" class SquadQuestion(object): \"\"\"", "question_id self.question = question self.answers = answers self.paragraph = paragraph # .get_paragraph_without_text_pickling() def", "List[SquadQuestionWithDistractors], dev_documents: List[SquadDocument], dev: List[SquadQuestionWithDistractors]): dir = join(CORPUS_DIR, SquadRelevanceCorpus.NAME) # if isfile(dir) or", "dev), (SquadRelevanceCorpus.TRAIN_DOC_FILE, train_document_dict), (SquadRelevanceCorpus.DEV_DOC_FILE, dev_document_dict)]: if data is not None: with open(join(dir, name),", "def get_train(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.TRAIN_FILE)) self._populate_questions(questions, train=True) return questions def", "self._populate_questions(questions, train=False) return questions def get_test(self) -> List[SquadQuestionWithDistractors]: return [] def _load(self, file):", "_build_id_paragraph_dict(self): return {x.par_id: x for x in self.paragraphs} def get_par(self, par_id) -> SquadParagraph:", "List[SquadQuestionWithDistractors]: return [] def _load(self, file): if not exists(file): return [] with open(file,", "sorted(list(voc)) with open(voc_file, \"w\") as f: for word in voc_list: f.write(word) f.write(\"\\n\") return", "state['train_title_to_document'] = None state['dev_title_to_document'] = None return state def __setstate__(self, state): self.__dict__ =", "= self.__dict__.copy() state['par_text'] = None return state return self.__dict__ class SquadDocument(object): def __init__(self,", "def get_vocab(self): \"\"\" get all-lower cased unique words for this corpus, includes train/dev/test", "len(train_document_dict) != len(train_documents): raise ValueError(\"different train documents have the same title!\") dev_document_dict =", "distractors: List[SquadParagraph]): super().__init__(question_id, question, answers, paragraph) # self.distractors = [x.get_paragraph_without_text_pickling() for x in", "pickle.load(f) else: print(\"Building pruned word vec %s for %s\" % (self.name, word_vec_name)) voc", "open(vec_file, \"wb\") as f: pickle.dump(vecs, f) return vecs def get_resource_loader(self): return ResourceLoader(self.get_pruned_word_vecs) def", "NAME = \"squad\" VOCAB_FILE = \"squad_vocab.txt\" WORD_VEC_SUFFIX = \"_pruned\" @staticmethod def make_corpus(train_documents: List[SquadDocument],", "__getstate__(self): state = self.__dict__.copy() state['train_title_to_document'] = None state['dev_title_to_document'] = None return state def", "we startup can be a big pain, so we cache the pruned vecs", "isfile(vec_file): print(\"Loading word vec %s for %s from cache\" % (word_vec_name, self.name)) with", "be no problems later def _insert_text_to_question(self, question: SquadQuestionWithDistractors, train: bool): for par in", "as f: return pickle.load(f) def __getstate__(self): state = self.__dict__.copy() state['train_title_to_document'] = None state['dev_title_to_document']", "voc_file = join(self.dir, self.VOCAB_FILE) if exists(voc_file): with open(voc_file, \"r\") as f: return [x.rstrip()", "so we cache the pruned vecs on-disk as a .npy file we can", "def make_corpus(train_documents: List[SquadDocument], train: List[SquadQuestionWithDistractors], dev_documents: List[SquadDocument], dev: List[SquadQuestionWithDistractors]): dir = join(CORPUS_DIR, SquadRelevanceCorpus.NAME)", "return f\"Title: {self.doc_title}, Id: {self.par_id}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.par_text) def __getstate__(self): if", "par: SquadParagraph): if par.par_id in self.id_to_par: raise ValueError(\"This paragraph id already exists in", "{self.answers}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.paragraph.par_text) class SquadQuestionWithDistractors(SquadQuestion): def __init__(self, question_id: str, question:", "# self.distractors = [x.get_paragraph_without_text_pickling() for x in distractors] self.distractors = distractors def add_distractors(self,", "word_vec_name + self.WORD_VEC_SUFFIX + \".npy\") if isfile(vec_file): print(\"Loading word vec %s for %s", "return pickle.load(f) else: print(\"Building pruned word vec %s for %s\" % (self.name, word_vec_name))", "this corpus, includes train/dev/test files \"\"\" voc_file = join(self.dir, self.VOCAB_FILE) if exists(voc_file): with", "if par.doc_title != self.title: raise ValueError(\"Paragraph title not matching document title!\") self.paragraphs.append(SquadParagraph(par.doc_title, par.par_id,", "have been pruned to the case-insensitive vocab of this corpus. WARNING: this includes", "return self.__dict__ class SquadDocument(object): def __init__(self, title: str, paragraphs: List[SquadParagraph]): self.title = title", "_load(self, file): if not exists(file): return [] with open(file, \"rb\") as f: return", "return {x.par_id: x for x in self.paragraphs} def get_par(self, par_id) -> SquadParagraph: return", "-> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.DEV_FILE)) self._populate_questions(questions, train=False) return questions def get_test(self) ->", "train documents have the same title!\") dev_document_dict = {doc.title: doc for doc in", "[] with open(file, \"rb\") as f: return pickle.load(f) def __getstate__(self): state = self.__dict__.copy()", "document!\") if par.doc_title != self.title: raise ValueError(\"Paragraph title not matching document title!\") self.paragraphs.append(SquadParagraph(par.doc_title,", "f\"Paragraph:\\n\" + ' '.join(self.par_text) def __getstate__(self): if not self.pickle_text: state = self.__dict__.copy() state['par_text']", "this corpus. WARNING: this includes dev words This exists since loading word-vecs each", "this includes dev words This exists since loading word-vecs each time we startup", "import RelevanceQuestion from hotpot.data_handling.word_vectors import load_word_vectors from hotpot.utils import ResourceLoader \"\"\" Squad data.", "f] else: voc = set() for fn in [self.get_train, self.get_dev, self.get_test]: for question", "RelevanceQuestion: return RelevanceQuestion(dataset_name='squad', question_id=squad_question.question_id, question_tokens=squad_question.question, supporting_facts=[squad_question.paragraph.par_text], distractors=[x.par_text for x in squad_question.distractors]) class SquadRelevanceCorpus(Configurable):", "Loads word vectors that have been pruned to the case-insensitive vocab of this", "corpus not built yet?\" % dir) self.dir = dir self.train_title_to_document = None self.dev_title_to_document", "in distractors] self.distractors = distractors def add_distractors(self, paragraphs: List[SquadParagraph]): \"\"\" Doesn't add duplicates", "# raise ValueError(\"Directory %s already exists and is non-empty\" % dir) if not", ".get_paragraph_without_text_pickling() def __repr__(self) -> str: return f\"{self.question_id}: {' '.join(self.question)}\\nAnswer(s): {self.answers}\\n\" \\ f\"Paragraph:\\n\" +", "List[SquadDocument], train: List[SquadQuestionWithDistractors], dev_documents: List[SquadDocument], dev: List[SquadQuestionWithDistractors]): dir = join(CORPUS_DIR, SquadRelevanceCorpus.NAME) # if", "get_vocab_file(self): self.get_vocab() return join(self.dir, self.VOCAB_FILE) def get_vocab(self): \"\"\" get all-lower cased unique words", "= pickle_text @property def num_tokens(self): return len(self.par_text) def get_paragraph_without_text_pickling(self): return SquadParagraph(self.doc_title, self.par_id, self.par_text,", "big pain, so we cache the pruned vecs on-disk as a .npy file", "-> List[SquadQuestionWithDistractors]: return [] def _load(self, file): if not exists(file): return [] with", "fn(): voc.update(x.lower() for x in question.question) for para in (question.distractors + [question.paragraph]): voc.update(x.lower()", "isdir(dir): raise ValueError(\"No directory %s, corpus not built yet?\" % dir) self.dir =", "SquadParagraph): if par.par_id in self.id_to_par: raise ValueError(\"This paragraph id already exists in this", "' '.join(self.par_text) def __getstate__(self): if not self.pickle_text: state = self.__dict__.copy() state['par_text'] = None", "with it.\"\"\" class SquadParagraph(object): def __init__(self, doc_title: str, par_id: int, par_text: List[str], pickle_text=True):", "for x in f] else: voc = set() for fn in [self.get_train, self.get_dev,", "not exists(dir): makedirs(dir) train_document_dict = {doc.title: doc for doc in train_documents} if len(train_document_dict)", "(question.distractors + [question.paragraph]): voc.update(x.lower() for x in para.par_text) voc_list = sorted(list(voc)) with open(voc_file,", "distractors=[x.par_text for x in squad_question.distractors]) class SquadRelevanceCorpus(Configurable): TRAIN_DOC_FILE = \"train_documents.pkl\" TRAIN_FILE = \"train_questions.pkl\"", "answers, paragraph) # self.distractors = [x.get_paragraph_without_text_pickling() for x in distractors] self.distractors = distractors", "= \"_pruned\" @staticmethod def make_corpus(train_documents: List[SquadDocument], train: List[SquadQuestionWithDistractors], dev_documents: List[SquadDocument], dev: List[SquadQuestionWithDistractors]): dir", "hotpot.data_handling.word_vectors import load_word_vectors from hotpot.utils import ResourceLoader \"\"\" Squad data. For now, leaving", "\\ f\"Paragraph:\\n\" + ' '.join(self.par_text) def __getstate__(self): if not self.pickle_text: state = self.__dict__.copy()", "to predict answers, we will deal with it.\"\"\" class SquadParagraph(object): def __init__(self, doc_title:", "%s\" % (self.name, word_vec_name)) voc = self.get_vocab() vecs = load_word_vectors(word_vec_name, voc) with open(vec_file,", "SquadParagraph(object): def __init__(self, doc_title: str, par_id: int, par_text: List[str], pickle_text=True): self.doc_title = doc_title", "self.distractors.append(paragraph) def squad_question_to_relevance_question(squad_question: SquadQuestionWithDistractors) -> RelevanceQuestion: return RelevanceQuestion(dataset_name='squad', question_id=squad_question.question_id, question_tokens=squad_question.question, supporting_facts=[squad_question.paragraph.par_text], distractors=[x.par_text for", "files \"\"\" voc_file = join(self.dir, self.VOCAB_FILE) if exists(voc_file): with open(voc_file, \"r\") as f:", "= {doc.title: doc for doc in train_documents} if len(train_document_dict) != len(train_documents): raise ValueError(\"different", "__init__(self): dir = join(CORPUS_DIR, self.NAME) if not exists(dir) or not isdir(dir): raise ValueError(\"No", "paragraphs: List[SquadParagraph]): \"\"\" Doesn't add duplicates \"\"\" for paragraph in paragraphs: if any((x.par_id", "self.id_to_par = self._build_id_paragraph_dict() def _build_id_paragraph_dict(self): return {x.par_id: x for x in self.paragraphs} def", "join(self.dir, self.VOCAB_FILE) if exists(voc_file): with open(voc_file, \"r\") as f: return [x.rstrip() for x", "Squad data. For now, leaving out answer spans. When we want to predict", "par_text: List[str], pickle_text=True): self.doc_title = doc_title self.par_id = par_id self.par_text = par_text self.pickle_text", "\"squad\" VOCAB_FILE = \"squad_vocab.txt\" WORD_VEC_SUFFIX = \"_pruned\" @staticmethod def make_corpus(train_documents: List[SquadDocument], train: List[SquadQuestionWithDistractors],", "train=True) return questions def get_dev(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.DEV_FILE)) self._populate_questions(questions, train=False)", "int, par_text: List[str], pickle_text=True): self.doc_title = doc_title self.par_id = par_id self.par_text = par_text", "Id: {self.par_id}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.par_text) def __getstate__(self): if not self.pickle_text: state", "paragraphs: {len(self.paragraphs)}\" class SquadQuestion(object): \"\"\" Squad Question and paragraphs.\"\"\" def __init__(self, question_id: str,", "word vectors that have been pruned to the case-insensitive vocab of this corpus.", "voc.update(x.lower() for x in para.par_text) voc_list = sorted(list(voc)) with open(voc_file, \"w\") as f:", "self.__dict__ class SquadDocument(object): def __init__(self, title: str, paragraphs: List[SquadParagraph]): self.title = title self.paragraphs", "= answers self.paragraph = paragraph # .get_paragraph_without_text_pickling() def __repr__(self) -> str: return f\"{self.question_id}:", "vocab of this corpus. WARNING: this includes dev words This exists since loading", "for word in voc_list: f.write(word) f.write(\"\\n\") return voc_list def get_pruned_word_vecs(self, word_vec_name, voc=None): \"\"\"", "def _insert_text_to_paragraph(self, paragraph: SquadParagraph, train: bool): title_to_doc = self.train_title_to_document if train else self.dev_title_to_document", "as f: return [x.rstrip() for x in f] else: voc = set() for", "data is not None: with open(join(dir, name), 'wb') as f: pickle.dump(data, f) def", "par.doc_title != self.title: raise ValueError(\"Paragraph title not matching document title!\") self.paragraphs.append(SquadParagraph(par.doc_title, par.par_id, par.par_text,", "built yet?\" % dir) self.dir = dir self.train_title_to_document = None self.dev_title_to_document = None", "a .npy file we can re-load quickly. \"\"\" vec_file = join(self.dir, word_vec_name +", "word_vec_name, voc=None): \"\"\" Loads word vectors that have been pruned to the case-insensitive", "par in [question.paragraph] + question.distractors: self._insert_text_to_paragraph(par, train) def _populate_questions(self, questions: List[SquadQuestionWithDistractors], train: bool):", "no problems later def _insert_text_to_question(self, question: SquadQuestionWithDistractors, train: bool): for par in [question.paragraph]", "= self.paragraphs[-1] def __repr__(self) -> str: return f\"Title: {self.title}. Number of paragraphs: {len(self.paragraphs)}\"", "join(CORPUS_DIR, SquadRelevanceCorpus.NAME) # if isfile(dir) or (exists(dir) and len(listdir(dir))) > 0: # raise", "List[SquadParagraph]): self.title = title self.paragraphs = paragraphs self.id_to_par = self._build_id_paragraph_dict() def _build_id_paragraph_dict(self): return", "pruned vecs on-disk as a .npy file we can re-load quickly. \"\"\" vec_file", "data in [(SquadRelevanceCorpus.TRAIN_FILE, train), (SquadRelevanceCorpus.DEV_FILE, dev), (SquadRelevanceCorpus.TRAIN_DOC_FILE, train_document_dict), (SquadRelevanceCorpus.DEV_DOC_FILE, dev_document_dict)]: if data is", "bool): for par in [question.paragraph] + question.distractors: self._insert_text_to_paragraph(par, train) def _populate_questions(self, questions: List[SquadQuestionWithDistractors],", "= question_id self.question = question self.answers = answers self.paragraph = paragraph # .get_paragraph_without_text_pickling()", "self.dev_title_to_document paragraph.par_text = title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text paragraph.pickle_text = True # So that there will be", "% dir) self.dir = dir self.train_title_to_document = None self.dev_title_to_document = None @property def", "len(dev_documents): raise ValueError(\"different dev documents have the same title!\") for name, data in", "List[SquadQuestionWithDistractors]): dir = join(CORPUS_DIR, SquadRelevanceCorpus.NAME) # if isfile(dir) or (exists(dir) and len(listdir(dir))) >", "= join(self.dir, self.VOCAB_FILE) if exists(voc_file): with open(voc_file, \"r\") as f: return [x.rstrip() for", "question: List[str], answers: Set[str], paragraph: SquadParagraph, distractors: List[SquadParagraph]): super().__init__(question_id, question, answers, paragraph) #", "self.train_title_to_document = self._load(join(self.dir, self.TRAIN_DOC_FILE)) else: if self.dev_title_to_document is None: self.dev_title_to_document = self._load(join(self.dir, self.DEV_DOC_FILE))", "distractors def add_distractors(self, paragraphs: List[SquadParagraph]): \"\"\" Doesn't add duplicates \"\"\" for paragraph in", "self._populate_questions(questions, train=True) return questions def get_dev(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.DEV_FILE)) self._populate_questions(questions,", "x in self.paragraphs} def get_par(self, par_id) -> SquadParagraph: return self.id_to_par[par_id] def add_par(self, par:", "= \"squad\" VOCAB_FILE = \"squad_vocab.txt\" WORD_VEC_SUFFIX = \"_pruned\" @staticmethod def make_corpus(train_documents: List[SquadDocument], train:", "raise ValueError(\"Directory %s already exists and is non-empty\" % dir) if not exists(dir):", "the case-insensitive vocab of this corpus. WARNING: this includes dev words This exists", "= {doc.title: doc for doc in dev_documents} if len(dev_document_dict) != len(dev_documents): raise ValueError(\"different", "Configurable from hotpot.data_handling.data import RelevanceQuestion from hotpot.data_handling.word_vectors import load_word_vectors from hotpot.utils import ResourceLoader", "self.get_test]: for question in fn(): voc.update(x.lower() for x in question.question) for para in", "(word_vec_name, self.name)) with open(vec_file, \"rb\") as f: return pickle.load(f) else: print(\"Building pruned word", "makedirs(dir) train_document_dict = {doc.title: doc for doc in train_documents} if len(train_document_dict) != len(train_documents):", "para.par_text) voc_list = sorted(list(voc)) with open(voc_file, \"w\") as f: for word in voc_list:", "voc = set() for fn in [self.get_train, self.get_dev, self.get_test]: for question in fn():", "with open(voc_file, \"r\") as f: return [x.rstrip() for x in f] else: voc", "get_train(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.TRAIN_FILE)) self._populate_questions(questions, train=True) return questions def get_dev(self)", "return voc_list def get_pruned_word_vecs(self, word_vec_name, voc=None): \"\"\" Loads word vectors that have been", "time we startup can be a big pain, so we cache the pruned", "self.par_text = par_text self.pickle_text = pickle_text @property def num_tokens(self): return len(self.par_text) def get_paragraph_without_text_pickling(self):", "= True # So that there will be no problems later def _insert_text_to_question(self,", "len(self.par_text) def get_paragraph_without_text_pickling(self): return SquadParagraph(self.doc_title, self.par_id, self.par_text, pickle_text=False) def __repr__(self) -> str: return", "isfile(dir) or (exists(dir) and len(listdir(dir))) > 0: # raise ValueError(\"Directory %s already exists", "question_id: str, question: List[str], answers: Set[str], paragraph: SquadParagraph): self.question_id = question_id self.question =", "questions def get_test(self) -> List[SquadQuestionWithDistractors]: return [] def _load(self, file): if not exists(file):", "List, Set from os.path import join, exists, isfile, isdir from os import makedirs,", "return pickle.load(f) def __getstate__(self): state = self.__dict__.copy() state['train_title_to_document'] = None state['dev_title_to_document'] = None", "and len(listdir(dir))) > 0: # raise ValueError(\"Directory %s already exists and is non-empty\"", "for x in distractors] self.distractors = distractors def add_distractors(self, paragraphs: List[SquadParagraph]): \"\"\" Doesn't", "self.question = question self.answers = answers self.paragraph = paragraph # .get_paragraph_without_text_pickling() def __repr__(self)", "name, data in [(SquadRelevanceCorpus.TRAIN_FILE, train), (SquadRelevanceCorpus.DEV_FILE, dev), (SquadRelevanceCorpus.TRAIN_DOC_FILE, train_document_dict), (SquadRelevanceCorpus.DEV_DOC_FILE, dev_document_dict)]: if data", "None: self.train_title_to_document = self._load(join(self.dir, self.TRAIN_DOC_FILE)) else: if self.dev_title_to_document is None: self.dev_title_to_document = self._load(join(self.dir,", "class SquadQuestionWithDistractors(SquadQuestion): def __init__(self, question_id: str, question: List[str], answers: Set[str], paragraph: SquadParagraph, distractors:", "as a .npy file we can re-load quickly. \"\"\" vec_file = join(self.dir, word_vec_name", "for q in questions: self._insert_text_to_question(q, train) def get_train(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir,", "-> RelevanceQuestion: return RelevanceQuestion(dataset_name='squad', question_id=squad_question.question_id, question_tokens=squad_question.question, supporting_facts=[squad_question.paragraph.par_text], distractors=[x.par_text for x in squad_question.distractors]) class", "Doesn't add duplicates \"\"\" for paragraph in paragraphs: if any((x.par_id == paragraph.par_id and", "pruned to the case-insensitive vocab of this corpus. WARNING: this includes dev words", "question_id: str, question: List[str], answers: Set[str], paragraph: SquadParagraph, distractors: List[SquadParagraph]): super().__init__(question_id, question, answers,", "def __init__(self, title: str, paragraphs: List[SquadParagraph]): self.title = title self.paragraphs = paragraphs self.id_to_par", "para in (question.distractors + [question.paragraph]): voc.update(x.lower() for x in para.par_text) voc_list = sorted(list(voc))", "% dir) if not exists(dir): makedirs(dir) train_document_dict = {doc.title: doc for doc in", "with open(vec_file, \"rb\") as f: return pickle.load(f) else: print(\"Building pruned word vec %s", "%s for %s from cache\" % (word_vec_name, self.name)) with open(vec_file, \"rb\") as f:", "doc for doc in dev_documents} if len(dev_document_dict) != len(dev_documents): raise ValueError(\"different dev documents", "quickly. \"\"\" vec_file = join(self.dir, word_vec_name + self.WORD_VEC_SUFFIX + \".npy\") if isfile(vec_file): print(\"Loading", "with open(file, \"rb\") as f: return pickle.load(f) def __getstate__(self): state = self.__dict__.copy() state['train_title_to_document']", "= join(CORPUS_DIR, self.NAME) if not exists(dir) or not isdir(dir): raise ValueError(\"No directory %s,", "% (word_vec_name, self.name)) with open(vec_file, \"rb\") as f: return pickle.load(f) else: print(\"Building pruned", "voc) with open(vec_file, \"wb\") as f: pickle.dump(vecs, f) return vecs def get_resource_loader(self): return", "pruned word vec %s for %s\" % (self.name, word_vec_name)) voc = self.get_vocab() vecs", "import pickle from typing import List, Set from os.path import join, exists, isfile,", "for par in [question.paragraph] + question.distractors: self._insert_text_to_paragraph(par, train) def _populate_questions(self, questions: List[SquadQuestionWithDistractors], train:", "\"rb\") as f: return pickle.load(f) def __getstate__(self): state = self.__dict__.copy() state['train_title_to_document'] = None", "\"squad_vocab.txt\" WORD_VEC_SUFFIX = \"_pruned\" @staticmethod def make_corpus(train_documents: List[SquadDocument], train: List[SquadQuestionWithDistractors], dev_documents: List[SquadDocument], dev:", "pickle.dump(vecs, f) return vecs def get_resource_loader(self): return ResourceLoader(self.get_pruned_word_vecs) def _load_document_dict(self, train: bool): if", "def get_paragraph_without_text_pickling(self): return SquadParagraph(self.doc_title, self.par_id, self.par_text, pickle_text=False) def __repr__(self) -> str: return f\"Title:", "train_document_dict), (SquadRelevanceCorpus.DEV_DOC_FILE, dev_document_dict)]: if data is not None: with open(join(dir, name), 'wb') as", "raise ValueError(\"different train documents have the same title!\") dev_document_dict = {doc.title: doc for", "def get_resource_loader(self): return ResourceLoader(self.get_pruned_word_vecs) def _load_document_dict(self, train: bool): if train: if self.train_title_to_document is", "already exists and is non-empty\" % dir) if not exists(dir): makedirs(dir) train_document_dict =", "+ self.WORD_VEC_SUFFIX + \".npy\") if isfile(vec_file): print(\"Loading word vec %s for %s from", "par_id: int, par_text: List[str], pickle_text=True): self.doc_title = doc_title self.par_id = par_id self.par_text =", "answers: Set[str], paragraph: SquadParagraph): self.question_id = question_id self.question = question self.answers = answers", "So that there will be no problems later def _insert_text_to_question(self, question: SquadQuestionWithDistractors, train:", "file): if not exists(file): return [] with open(file, \"rb\") as f: return pickle.load(f)", "there will be no problems later def _insert_text_to_question(self, question: SquadQuestionWithDistractors, train: bool): for", "in [(SquadRelevanceCorpus.TRAIN_FILE, train), (SquadRelevanceCorpus.DEV_FILE, dev), (SquadRelevanceCorpus.TRAIN_DOC_FILE, train_document_dict), (SquadRelevanceCorpus.DEV_DOC_FILE, dev_document_dict)]: if data is not", "= par_id self.par_text = par_text self.pickle_text = pickle_text @property def num_tokens(self): return len(self.par_text)", "!= len(dev_documents): raise ValueError(\"different dev documents have the same title!\") for name, data", "_insert_text_to_question(self, question: SquadQuestionWithDistractors, train: bool): for par in [question.paragraph] + question.distractors: self._insert_text_to_paragraph(par, train)", "pickle_text @property def num_tokens(self): return len(self.par_text) def get_paragraph_without_text_pickling(self): return SquadParagraph(self.doc_title, self.par_id, self.par_text, pickle_text=False)", "\"wb\") as f: pickle.dump(vecs, f) return vecs def get_resource_loader(self): return ResourceLoader(self.get_pruned_word_vecs) def _load_document_dict(self,", "title not matching document title!\") self.paragraphs.append(SquadParagraph(par.doc_title, par.par_id, par.par_text, pickle_text=True)) self.id_to_par[par.par_id] = self.paragraphs[-1] def", "as f: pickle.dump(vecs, f) return vecs def get_resource_loader(self): return ResourceLoader(self.get_pruned_word_vecs) def _load_document_dict(self, train:", "same title!\") dev_document_dict = {doc.title: doc for doc in dev_documents} if len(dev_document_dict) !=", "SquadParagraph: return self.id_to_par[par_id] def add_par(self, par: SquadParagraph): if par.par_id in self.id_to_par: raise ValueError(\"This", "for fn in [self.get_train, self.get_dev, self.get_test]: for question in fn(): voc.update(x.lower() for x", "we cache the pruned vecs on-disk as a .npy file we can re-load", "have the same title!\") dev_document_dict = {doc.title: doc for doc in dev_documents} if", "= \"squad_vocab.txt\" WORD_VEC_SUFFIX = \"_pruned\" @staticmethod def make_corpus(train_documents: List[SquadDocument], train: List[SquadQuestionWithDistractors], dev_documents: List[SquadDocument],", "def _build_id_paragraph_dict(self): return {x.par_id: x for x in self.paragraphs} def get_par(self, par_id) ->", "for doc in dev_documents} if len(dev_document_dict) != len(dev_documents): raise ValueError(\"different dev documents have", "= [x.get_paragraph_without_text_pickling() for x in distractors] self.distractors = distractors def add_distractors(self, paragraphs: List[SquadParagraph]):", "x.doc_title == paragraph.doc_title) for x in self.distractors): continue # self.distractors.append(paragraph.get_paragraph_without_text_pickling()) self.distractors.append(paragraph) def squad_question_to_relevance_question(squad_question:", "title!\") self.paragraphs.append(SquadParagraph(par.doc_title, par.par_id, par.par_text, pickle_text=True)) self.id_to_par[par.par_id] = self.paragraphs[-1] def __repr__(self) -> str: return", "yet?\" % dir) self.dir = dir self.train_title_to_document = None self.dev_title_to_document = None @property", "open(voc_file, \"w\") as f: for word in voc_list: f.write(word) f.write(\"\\n\") return voc_list def", "word vec %s for %s from cache\" % (word_vec_name, self.name)) with open(vec_file, \"rb\")", "# if isfile(dir) or (exists(dir) and len(listdir(dir))) > 0: # raise ValueError(\"Directory %s", "includes dev words This exists since loading word-vecs each time we startup can", "f\"Title: {self.title}. Number of paragraphs: {len(self.paragraphs)}\" class SquadQuestion(object): \"\"\" Squad Question and paragraphs.\"\"\"", "get_paragraph_without_text_pickling(self): return SquadParagraph(self.doc_title, self.par_id, self.par_text, pickle_text=False) def __repr__(self) -> str: return f\"Title: {self.doc_title},", "%s already exists and is non-empty\" % dir) if not exists(dir): makedirs(dir) train_document_dict", "self._insert_text_to_paragraph(par, train) def _populate_questions(self, questions: List[SquadQuestionWithDistractors], train: bool): self._load_document_dict(train) for q in questions:", "dev: List[SquadQuestionWithDistractors]): dir = join(CORPUS_DIR, SquadRelevanceCorpus.NAME) # if isfile(dir) or (exists(dir) and len(listdir(dir)))", "train_document_dict = {doc.title: doc for doc in train_documents} if len(train_document_dict) != len(train_documents): raise", "def num_tokens(self): return len(self.par_text) def get_paragraph_without_text_pickling(self): return SquadParagraph(self.doc_title, self.par_id, self.par_text, pickle_text=False) def __repr__(self)", "self.id_to_par[par.par_id] = self.paragraphs[-1] def __repr__(self) -> str: return f\"Title: {self.title}. Number of paragraphs:", "= join(CORPUS_DIR, SquadRelevanceCorpus.NAME) # if isfile(dir) or (exists(dir) and len(listdir(dir))) > 0: #", "self.dev_title_to_document = self._load(join(self.dir, self.DEV_DOC_FILE)) def _insert_text_to_paragraph(self, paragraph: SquadParagraph, train: bool): title_to_doc = self.train_title_to_document", "@property def evidence(self): return None def get_vocab_file(self): self.get_vocab() return join(self.dir, self.VOCAB_FILE) def get_vocab(self):", "train: List[SquadQuestionWithDistractors], dev_documents: List[SquadDocument], dev: List[SquadQuestionWithDistractors]): dir = join(CORPUS_DIR, SquadRelevanceCorpus.NAME) # if isfile(dir)", "def __repr__(self) -> str: return f\"{self.question_id}: {' '.join(self.question)}\\nAnswer(s): {self.answers}\\n\" \\ f\"Paragraph:\\n\" + '", "can be a big pain, so we cache the pruned vecs on-disk as", "{doc.title: doc for doc in train_documents} if len(train_document_dict) != len(train_documents): raise ValueError(\"different train", "for para in (question.distractors + [question.paragraph]): voc.update(x.lower() for x in para.par_text) voc_list =", "question.distractors: self._insert_text_to_paragraph(par, train) def _populate_questions(self, questions: List[SquadQuestionWithDistractors], train: bool): self._load_document_dict(train) for q in", "train) def get_train(self) -> List[SquadQuestionWithDistractors]: questions = self._load(join(self.dir, self.TRAIN_FILE)) self._populate_questions(questions, train=True) return questions", "paragraph # .get_paragraph_without_text_pickling() def __repr__(self) -> str: return f\"{self.question_id}: {' '.join(self.question)}\\nAnswer(s): {self.answers}\\n\" \\", "state = self.__dict__.copy() state['train_title_to_document'] = None state['dev_title_to_document'] = None return state def __setstate__(self,", "pickle.dump(data, f) def __init__(self): dir = join(CORPUS_DIR, self.NAME) if not exists(dir) or not", "\"\"\" voc_file = join(self.dir, self.VOCAB_FILE) if exists(voc_file): with open(voc_file, \"r\") as f: return", "{self.doc_title}, Id: {self.par_id}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.par_text) def __getstate__(self): if not self.pickle_text:", "'.join(self.par_text) def __getstate__(self): if not self.pickle_text: state = self.__dict__.copy() state['par_text'] = None return", "squad_question_to_relevance_question(squad_question: SquadQuestionWithDistractors) -> RelevanceQuestion: return RelevanceQuestion(dataset_name='squad', question_id=squad_question.question_id, question_tokens=squad_question.question, supporting_facts=[squad_question.paragraph.par_text], distractors=[x.par_text for x in", "dir self.train_title_to_document = None self.dev_title_to_document = None @property def evidence(self): return None def", "WORD_VEC_SUFFIX = \"_pruned\" @staticmethod def make_corpus(train_documents: List[SquadDocument], train: List[SquadQuestionWithDistractors], dev_documents: List[SquadDocument], dev: List[SquadQuestionWithDistractors]):", "SquadQuestion(object): \"\"\" Squad Question and paragraphs.\"\"\" def __init__(self, question_id: str, question: List[str], answers:", "join(CORPUS_DIR, self.NAME) if not exists(dir) or not isdir(dir): raise ValueError(\"No directory %s, corpus", "= par_text self.pickle_text = pickle_text @property def num_tokens(self): return len(self.par_text) def get_paragraph_without_text_pickling(self): return", "with open(join(dir, name), 'wb') as f: pickle.dump(data, f) def __init__(self): dir = join(CORPUS_DIR,", "not None: with open(join(dir, name), 'wb') as f: pickle.dump(data, f) def __init__(self): dir", "len(listdir(dir))) > 0: # raise ValueError(\"Directory %s already exists and is non-empty\" %", "out answer spans. When we want to predict answers, we will deal with", "for this corpus, includes train/dev/test files \"\"\" voc_file = join(self.dir, self.VOCAB_FILE) if exists(voc_file):", "f.write(word) f.write(\"\\n\") return voc_list def get_pruned_word_vecs(self, word_vec_name, voc=None): \"\"\" Loads word vectors that", "import load_word_vectors from hotpot.utils import ResourceLoader \"\"\" Squad data. For now, leaving out", "in fn(): voc.update(x.lower() for x in question.question) for para in (question.distractors + [question.paragraph]):", "for name, data in [(SquadRelevanceCorpus.TRAIN_FILE, train), (SquadRelevanceCorpus.DEV_FILE, dev), (SquadRelevanceCorpus.TRAIN_DOC_FILE, train_document_dict), (SquadRelevanceCorpus.DEV_DOC_FILE, dev_document_dict)]: if", "file we can re-load quickly. \"\"\" vec_file = join(self.dir, word_vec_name + self.WORD_VEC_SUFFIX +", "= None self.dev_title_to_document = None @property def evidence(self): return None def get_vocab_file(self): self.get_vocab()", "loading word-vecs each time we startup can be a big pain, so we", "\"r\") as f: return [x.rstrip() for x in f] else: voc = set()", "in question.question) for para in (question.distractors + [question.paragraph]): voc.update(x.lower() for x in para.par_text)", "if not exists(dir): makedirs(dir) train_document_dict = {doc.title: doc for doc in train_documents} if", "dev documents have the same title!\") for name, data in [(SquadRelevanceCorpus.TRAIN_FILE, train), (SquadRelevanceCorpus.DEV_FILE,", "dev words This exists since loading word-vecs each time we startup can be", "x for x in self.paragraphs} def get_par(self, par_id) -> SquadParagraph: return self.id_to_par[par_id] def", "paragraph.par_id and x.doc_title == paragraph.doc_title) for x in self.distractors): continue # self.distractors.append(paragraph.get_paragraph_without_text_pickling()) self.distractors.append(paragraph)", "str, paragraphs: List[SquadParagraph]): self.title = title self.paragraphs = paragraphs self.id_to_par = self._build_id_paragraph_dict() def", "DEV_DOC_FILE = \"dev_documents.pkl\" DEV_FILE = \"dev_questions.pkl\" NAME = \"squad\" VOCAB_FILE = \"squad_vocab.txt\" WORD_VEC_SUFFIX", "that there will be no problems later def _insert_text_to_question(self, question: SquadQuestionWithDistractors, train: bool):", "in f] else: voc = set() for fn in [self.get_train, self.get_dev, self.get_test]: for", "the same title!\") dev_document_dict = {doc.title: doc for doc in dev_documents} if len(dev_document_dict)", "self.train_title_to_document if train else self.dev_title_to_document paragraph.par_text = title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text paragraph.pickle_text = True # So", "problems later def _insert_text_to_question(self, question: SquadQuestionWithDistractors, train: bool): for par in [question.paragraph] +", "dev_document_dict)]: if data is not None: with open(join(dir, name), 'wb') as f: pickle.dump(data,", "= self._load(join(self.dir, self.DEV_FILE)) self._populate_questions(questions, train=False) return questions def get_test(self) -> List[SquadQuestionWithDistractors]: return []", "Number of paragraphs: {len(self.paragraphs)}\" class SquadQuestion(object): \"\"\" Squad Question and paragraphs.\"\"\" def __init__(self,", "def __init__(self, question_id: str, question: List[str], answers: Set[str], paragraph: SquadParagraph): self.question_id = question_id", "{' '.join(self.question)}\\nAnswer(s): {self.answers}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.paragraph.par_text) class SquadQuestionWithDistractors(SquadQuestion): def __init__(self, question_id:", "exists(dir): makedirs(dir) train_document_dict = {doc.title: doc for doc in train_documents} if len(train_document_dict) !=", "train_documents} if len(train_document_dict) != len(train_documents): raise ValueError(\"different train documents have the same title!\")", "This exists since loading word-vecs each time we startup can be a big", "open(file, \"rb\") as f: return pickle.load(f) def __getstate__(self): state = self.__dict__.copy() state['train_title_to_document'] =", "load_word_vectors from hotpot.utils import ResourceLoader \"\"\" Squad data. For now, leaving out answer", "DEV_FILE = \"dev_questions.pkl\" NAME = \"squad\" VOCAB_FILE = \"squad_vocab.txt\" WORD_VEC_SUFFIX = \"_pruned\" @staticmethod", "= self.__dict__.copy() state['train_title_to_document'] = None state['dev_title_to_document'] = None return state def __setstate__(self, state):", "__init__(self, question_id: str, question: List[str], answers: Set[str], paragraph: SquadParagraph): self.question_id = question_id self.question", "if any((x.par_id == paragraph.par_id and x.doc_title == paragraph.doc_title) for x in self.distractors): continue", "= \"train_questions.pkl\" DEV_DOC_FILE = \"dev_documents.pkl\" DEV_FILE = \"dev_questions.pkl\" NAME = \"squad\" VOCAB_FILE =", "\"\"\" Loads word vectors that have been pruned to the case-insensitive vocab of", "def __getstate__(self): if not self.pickle_text: state = self.__dict__.copy() state['par_text'] = None return state", "add_par(self, par: SquadParagraph): if par.par_id in self.id_to_par: raise ValueError(\"This paragraph id already exists", "from cache\" % (word_vec_name, self.name)) with open(vec_file, \"rb\") as f: return pickle.load(f) else:", "if len(dev_document_dict) != len(dev_documents): raise ValueError(\"different dev documents have the same title!\") for", "Set[str], paragraph: SquadParagraph): self.question_id = question_id self.question = question self.answers = answers self.paragraph", "get_vocab(self): \"\"\" get all-lower cased unique words for this corpus, includes train/dev/test files", "def add_distractors(self, paragraphs: List[SquadParagraph]): \"\"\" Doesn't add duplicates \"\"\" for paragraph in paragraphs:", "if self.train_title_to_document is None: self.train_title_to_document = self._load(join(self.dir, self.TRAIN_DOC_FILE)) else: if self.dev_title_to_document is None:", "= None state['dev_title_to_document'] = None return state def __setstate__(self, state): self.__dict__ = state", "of paragraphs: {len(self.paragraphs)}\" class SquadQuestion(object): \"\"\" Squad Question and paragraphs.\"\"\" def __init__(self, question_id:", "self.distractors = [x.get_paragraph_without_text_pickling() for x in distractors] self.distractors = distractors def add_distractors(self, paragraphs:", "par.par_id in self.id_to_par: raise ValueError(\"This paragraph id already exists in this document!\") if", "not matching document title!\") self.paragraphs.append(SquadParagraph(par.doc_title, par.par_id, par.par_text, pickle_text=True)) self.id_to_par[par.par_id] = self.paragraphs[-1] def __repr__(self)", "def _insert_text_to_question(self, question: SquadQuestionWithDistractors, train: bool): for par in [question.paragraph] + question.distractors: self._insert_text_to_paragraph(par,", "{self.par_id}\\n\" \\ f\"Paragraph:\\n\" + ' '.join(self.par_text) def __getstate__(self): if not self.pickle_text: state =", "@staticmethod def make_corpus(train_documents: List[SquadDocument], train: List[SquadQuestionWithDistractors], dev_documents: List[SquadDocument], dev: List[SquadQuestionWithDistractors]): dir = join(CORPUS_DIR," ]
[]
[ "self.dp, self.img = match_finder.find_kp_dp(self.img) # def main(): # map_ = image_processing(filename = '26_12_2021_nn')", "= home+'/copa5/map' file_exists = os.path.exists(data_path+'/'+filename+'.tif') # print(\"start job\") try: if file_exists is True:", "= 0.0 time_start = time() if filename is not None: home = os.getenv(\"HOME\")", "except: e = 0 sub_str = [float(k) for k in sub_str] point =", "= cv2.imread(data_path+'/'+filename+'.tif') else: # raster = gdal.Open(data_path+'/'+filename+'.TIF') self.img = cv2.imread(data_path+'/'+filename+'.TIF') except: # print(\"NO", "# raster = gdal.Open(data_path+'/'+filename+'.TIF') self.img = cv2.imread(data_path+'/'+filename+'.TIF') except: # print(\"NO MAP FILE\") return", "= cv2.cuda.getCudaEnabledDeviceCount() if count > 0: # print(\"CUDA IS ENABLED\") return True else:", "GeodeticConvert from decimal import Decimal import os from time import time @dataclass class", "None self.dp = None self.cadr_scale = 0.0 time_start = time() if filename is", "rospkg import gdal from dataclasses import dataclass from geodetic_conv import GeodeticConvert from decimal", "self.img = raster.ReadAsArray() # self.img = np.dstack((self.img[0],self.img[1],self.img[2])) # self.img = self.img[0] self.img =", "abs(x_2): x = x_1 else: x = x_2 if abs(y_1) > abs(y_2): y", "else: x = x_2 if abs(y_1) > abs(y_2): y = y_1 else: y", "count = cv2.cuda.getCudaEnabledDeviceCount() if count > 0: # print(\"CUDA IS ENABLED\") return True", "z_2 = self.g_c.geodetic2Ned(self.main_points[3].lat, self.main_points[3].lon, 0) if abs(x_1) > abs(x_2): x = x_1 else:", "print(\"to gray complete\", time() - time_start) time_start = time() with open(data_path+'/'+filename+'.@@@') as f:", "self.pixel_size) self.kp, self.dp, self.img = match_finder.find_kp_dp(self.img) # def main(): # map_ = image_processing(filename", "None): self.cuda = self.is_cuda_cv() self.main_points = [] self.g_c = GeodeticConvert() self.img = None", "# print(\"to gray complete\", time() - time_start) time_start = time() with open(data_path+'/'+filename+'.@@@') as", "height, poi): x = Decimal(np.tanh(poi/2)*2*height) self.pixel_size = x/Decimal(self.img.shape[1]) def is_cuda_cv(self): try: count =", "import Decimal import os from time import time @dataclass class img_point: pixel_y: int", "= cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY) # print(\"to gray complete\", time() - time_start) time_start = time()", "gdal.Open(data_path+'/'+filename+'.tif') self.img = cv2.imread(data_path+'/'+filename+'.tif') else: # raster = gdal.Open(data_path+'/'+filename+'.TIF') self.img = cv2.imread(data_path+'/'+filename+'.TIF') except:", "filename = None, img = None): self.cuda = self.is_cuda_cv() self.main_points = [] self.g_c", "import os from time import time @dataclass class img_point: pixel_y: int = 0", "Decimal(pixel_size_2))/Decimal(2) self.pixel_size = pixel_size return pixel_size def find_pixel_size_by_height(self, height, poi): x = Decimal(np.tanh(poi/2)*2*height)", "from decimal import Decimal import os from time import time @dataclass class img_point:", "= GeodeticConvert() self.img = None self.pixel_size = 0 self.kp = None self.dp =", "= 0.0 class image_processing(): def __init__(self, filename = None, img = None): self.cuda", "try: sub_str.remove('\\n') except: e = 0 sub_str = [float(k) for k in sub_str]", "> abs(y_2): y = y_1 else: y = y_2 pixel_size_1 = Decimal((abs(x)))/Decimal(self.img.shape[0]) pixel_size_2", "cv2.COLOR_RGB2GRAY) # print(\"to gray complete\", time() - time_start) time_start = time() with open(data_path+'/'+filename+'.@@@')", "= match_finder.find_kp_dp(self.img) # def main(): # map_ = image_processing(filename = '26_12_2021_nn') # map_.find_pixel_size()", "0) if abs(x_1) > abs(x_2): x = x_1 else: x = x_2 if", "import cv2 import numpy as np from PIL import Image import rospkg import", "self.cuda = self.is_cuda_cv() self.main_points = [] self.g_c = GeodeticConvert() self.img = None self.pixel_size", "lines = f.readlines() for i in range(2, len(lines)): sub_str = lines[i].split(' ') sub_str", "x_2, y_2, z_2 = self.g_c.geodetic2Ned(self.main_points[3].lat, self.main_points[3].lon, 0) if abs(x_1) > abs(x_2): x =", "= x_1 else: x = x_2 if abs(y_1) > abs(y_2): y = y_1", "abs(y_2): y = y_1 else: y = y_2 pixel_size_1 = Decimal((abs(x)))/Decimal(self.img.shape[0]) pixel_size_2 =", "len(lines)): sub_str = lines[i].split(' ') sub_str = [j for j in sub_str if", "dataclasses import dataclass from geodetic_conv import GeodeticConvert from decimal import Decimal import os", "0 pixel_x: int = 0 lat: float = 0.0 lon: float = 0.0", "sub_str[3]) self.main_points.append(point) else: self.img = img # self.img = self.img[:,:,2] def find_pixel_size(self): self.g_c.initialiseReference(self.main_points[0].lat,", "0: # print(\"CUDA IS ENABLED\") return True else: # print(\"CUDA IS DISABLED\") return", "if count > 0: # print(\"CUDA IS ENABLED\") return True else: # print(\"CUDA", "# self.img = np.dstack((self.img[0],self.img[1],self.img[2])) # self.img = self.img[0] self.img = cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY) #", "with open(data_path+'/'+filename+'.@@@') as f: lines = f.readlines() for i in range(2, len(lines)): sub_str", "= [] self.g_c = GeodeticConvert() self.img = None self.pixel_size = 0 self.kp =", "self.main_points[0].lon, 0) x_1, y_1, z_1 = self.g_c.geodetic2Ned(self.main_points[1].lat, self.main_points[1].lon, 0) x_2, y_2, z_2 =", "else: # raster = gdal.Open(data_path+'/'+filename+'.TIF') self.img = cv2.imread(data_path+'/'+filename+'.TIF') except: # print(\"NO MAP FILE\")", "cv2.imread(data_path+'/'+filename+'.tif') else: # raster = gdal.Open(data_path+'/'+filename+'.TIF') self.img = cv2.imread(data_path+'/'+filename+'.TIF') except: # print(\"NO MAP", "home = os.getenv(\"HOME\") data_path = home+'/copa5/map' file_exists = os.path.exists(data_path+'/'+filename+'.tif') # print(\"start job\") try:", "from geodetic_conv import GeodeticConvert from decimal import Decimal import os from time import", "x = x_2 if abs(y_1) > abs(y_2): y = y_1 else: y =", "f.readlines() for i in range(2, len(lines)): sub_str = lines[i].split(' ') sub_str = [j", "abs(y_1) > abs(y_2): y = y_1 else: y = y_2 pixel_size_1 = Decimal((abs(x)))/Decimal(self.img.shape[0])", "> abs(x_2): x = x_1 else: x = x_2 if abs(y_1) > abs(y_2):", "0.0 lon: float = 0.0 class image_processing(): def __init__(self, filename = None, img", "None # print(\"map loaded\", time() - time_start) time_start = time() # self.img =", "self.kp, self.dp, self.img = match_finder.find_kp_dp(self.img) # def main(): # map_ = image_processing(filename =", "= gdal.Open(data_path+'/'+filename+'.TIF') self.img = cv2.imread(data_path+'/'+filename+'.TIF') except: # print(\"NO MAP FILE\") return None #", "job\") try: if file_exists is True: # raster = gdal.Open(data_path+'/'+filename+'.tif') self.img = cv2.imread(data_path+'/'+filename+'.tif')", "- time_start) time_start = time() # self.img = raster.ReadAsArray() # self.img = np.dstack((self.img[0],self.img[1],self.img[2]))", "# print(\"CUDA IS ENABLED\") return True else: # print(\"CUDA IS DISABLED\") return False", "self.img = np.dstack((self.img[0],self.img[1],self.img[2])) # self.img = self.img[0] self.img = cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY) # print(\"to", "self.is_cuda_cv() self.main_points = [] self.g_c = GeodeticConvert() self.img = None self.pixel_size = 0", "= self.is_cuda_cv() self.main_points = [] self.g_c = GeodeticConvert() self.img = None self.pixel_size =", "Decimal import os from time import time @dataclass class img_point: pixel_y: int =", "None: home = os.getenv(\"HOME\") data_path = home+'/copa5/map' file_exists = os.path.exists(data_path+'/'+filename+'.tif') # print(\"start job\")", "from numpy import reshape import rospy import cv2 import numpy as np from", "0 sub_str = [float(k) for k in sub_str] point = img_point(sub_str[0], sub_str[1], sub_str[2],", "time_start = time() if filename is not None: home = os.getenv(\"HOME\") data_path =", "os.path.exists(data_path+'/'+filename+'.tif') # print(\"start job\") try: if file_exists is True: # raster = gdal.Open(data_path+'/'+filename+'.tif')", "time_start) time_start = time() # self.img = raster.ReadAsArray() # self.img = np.dstack((self.img[0],self.img[1],self.img[2])) #", "# print(\"map loaded\", time() - time_start) time_start = time() # self.img = raster.ReadAsArray()", "self.cadr_scale, self.pixel_size = match_finder.rescale_cadr(self.img, self.pixel_size) self.kp, self.dp, self.img = match_finder.find_kp_dp(self.img) # def main():", "python3 from cv2 import resize from numpy import reshape import rospy import cv2", "# print(\"CUDA IS DISABLED\") return False except: # print(\"CUDA IS DISABLED\") return False", "raster = gdal.Open(data_path+'/'+filename+'.TIF') self.img = cv2.imread(data_path+'/'+filename+'.TIF') except: # print(\"NO MAP FILE\") return None", "self.img = self.img[:,:,2] def find_pixel_size(self): self.g_c.initialiseReference(self.main_points[0].lat, self.main_points[0].lon, 0) x_1, y_1, z_1 = self.g_c.geodetic2Ned(self.main_points[1].lat,", "= Decimal((abs(y)))/Decimal(self.img.shape[1]) pixel_size = (Decimal(pixel_size_1) + Decimal(pixel_size_2))/Decimal(2) self.pixel_size = pixel_size return pixel_size def", "count > 0: # print(\"CUDA IS ENABLED\") return True else: # print(\"CUDA IS", "print(\"CUDA IS ENABLED\") return True else: # print(\"CUDA IS DISABLED\") return False except:", "if filename is not None: home = os.getenv(\"HOME\") data_path = home+'/copa5/map' file_exists =", "print(\"NO MAP FILE\") return None # print(\"map loaded\", time() - time_start) time_start =", "time() with open(data_path+'/'+filename+'.@@@') as f: lines = f.readlines() for i in range(2, len(lines)):", "lines[i].split(' ') sub_str = [j for j in sub_str if j] try: sub_str.remove('\\n')", "y_2, z_2 = self.g_c.geodetic2Ned(self.main_points[3].lat, self.main_points[3].lon, 0) if abs(x_1) > abs(x_2): x = x_1", "= lines[i].split(' ') sub_str = [j for j in sub_str if j] try:", "cv2 import numpy as np from PIL import Image import rospkg import gdal", "= None): self.cuda = self.is_cuda_cv() self.main_points = [] self.g_c = GeodeticConvert() self.img =", "= Decimal(np.tanh(poi/2)*2*height) self.pixel_size = x/Decimal(self.img.shape[1]) def is_cuda_cv(self): try: count = cv2.cuda.getCudaEnabledDeviceCount() if count", "x = x_1 else: x = x_2 if abs(y_1) > abs(y_2): y =", "= Decimal((abs(x)))/Decimal(self.img.shape[0]) pixel_size_2 = Decimal((abs(y)))/Decimal(self.img.shape[1]) pixel_size = (Decimal(pixel_size_1) + Decimal(pixel_size_2))/Decimal(2) self.pixel_size = pixel_size", "# raster = gdal.Open(data_path+'/'+filename+'.tif') self.img = cv2.imread(data_path+'/'+filename+'.tif') else: # raster = gdal.Open(data_path+'/'+filename+'.TIF') self.img", "= None, img = None): self.cuda = self.is_cuda_cv() self.main_points = [] self.g_c =", "sub_str = lines[i].split(' ') sub_str = [j for j in sub_str if j]", "sub_str = [float(k) for k in sub_str] point = img_point(sub_str[0], sub_str[1], sub_str[2], sub_str[3])", "cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY) # print(\"to gray complete\", time() - time_start) time_start = time() with", "numpy as np from PIL import Image import rospkg import gdal from dataclasses", "+ Decimal(pixel_size_2))/Decimal(2) self.pixel_size = pixel_size return pixel_size def find_pixel_size_by_height(self, height, poi): x =", "raster = gdal.Open(data_path+'/'+filename+'.tif') self.img = cv2.imread(data_path+'/'+filename+'.tif') else: # raster = gdal.Open(data_path+'/'+filename+'.TIF') self.img =", "time_start) time_start = time() with open(data_path+'/'+filename+'.@@@') as f: lines = f.readlines() for i", "lon: float = 0.0 class image_processing(): def __init__(self, filename = None, img =", "point = img_point(sub_str[0], sub_str[1], sub_str[2], sub_str[3]) self.main_points.append(point) else: self.img = img # self.img", "time_start = time() with open(data_path+'/'+filename+'.@@@') as f: lines = f.readlines() for i in", "GeodeticConvert() self.img = None self.pixel_size = 0 self.kp = None self.dp = None", "geodetic_conv import GeodeticConvert from decimal import Decimal import os from time import time", "x_2 if abs(y_1) > abs(y_2): y = y_1 else: y = y_2 pixel_size_1", "self.img = match_finder.find_kp_dp(self.img) # def main(): # map_ = image_processing(filename = '26_12_2021_nn') #", "<reponame>Fricodelco/image_matching #!/usr/bin/env python3 from cv2 import resize from numpy import reshape import rospy", "import dataclass from geodetic_conv import GeodeticConvert from decimal import Decimal import os from", "= [j for j in sub_str if j] try: sub_str.remove('\\n') except: e =", "pixel_size def find_pixel_size_by_height(self, height, poi): x = Decimal(np.tanh(poi/2)*2*height) self.pixel_size = x/Decimal(self.img.shape[1]) def is_cuda_cv(self):", "i in range(2, len(lines)): sub_str = lines[i].split(' ') sub_str = [j for j", "self.img = self.img[0] self.img = cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY) # print(\"to gray complete\", time() -", "y_1 else: y = y_2 pixel_size_1 = Decimal((abs(x)))/Decimal(self.img.shape[0]) pixel_size_2 = Decimal((abs(y)))/Decimal(self.img.shape[1]) pixel_size =", "self.g_c = GeodeticConvert() self.img = None self.pixel_size = 0 self.kp = None self.dp", "is not None: home = os.getenv(\"HOME\") data_path = home+'/copa5/map' file_exists = os.path.exists(data_path+'/'+filename+'.tif') #", "data_path = home+'/copa5/map' file_exists = os.path.exists(data_path+'/'+filename+'.tif') # print(\"start job\") try: if file_exists is", "import rospy import cv2 import numpy as np from PIL import Image import", "ENABLED\") return True else: # print(\"CUDA IS DISABLED\") return False except: # print(\"CUDA", "os from time import time @dataclass class img_point: pixel_y: int = 0 pixel_x:", "= None self.dp = None self.cadr_scale = 0.0 time_start = time() if filename", "self.pixel_size = x/Decimal(self.img.shape[1]) def is_cuda_cv(self): try: count = cv2.cuda.getCudaEnabledDeviceCount() if count > 0:", "= img # self.img = self.img[:,:,2] def find_pixel_size(self): self.g_c.initialiseReference(self.main_points[0].lat, self.main_points[0].lon, 0) x_1, y_1,", "filename is not None: home = os.getenv(\"HOME\") data_path = home+'/copa5/map' file_exists = os.path.exists(data_path+'/'+filename+'.tif')", "# self.img = self.img[:,:,2] def find_pixel_size(self): self.g_c.initialiseReference(self.main_points[0].lat, self.main_points[0].lon, 0) x_1, y_1, z_1 =", "= 0 self.kp = None self.dp = None self.cadr_scale = 0.0 time_start =", "None, img = None): self.cuda = self.is_cuda_cv() self.main_points = [] self.g_c = GeodeticConvert()", "self.main_points[1].lon, 0) x_2, y_2, z_2 = self.g_c.geodetic2Ned(self.main_points[3].lat, self.main_points[3].lon, 0) if abs(x_1) > abs(x_2):", "self.img, self.cadr_scale, self.pixel_size = match_finder.rescale_cadr(self.img, self.pixel_size) self.kp, self.dp, self.img = match_finder.find_kp_dp(self.img) # def", "> 0: # print(\"CUDA IS ENABLED\") return True else: # print(\"CUDA IS DISABLED\")", "as f: lines = f.readlines() for i in range(2, len(lines)): sub_str = lines[i].split('", "0) x_1, y_1, z_1 = self.g_c.geodetic2Ned(self.main_points[1].lat, self.main_points[1].lon, 0) x_2, y_2, z_2 = self.g_c.geodetic2Ned(self.main_points[3].lat,", "import numpy as np from PIL import Image import rospkg import gdal from", "= 0 lat: float = 0.0 lon: float = 0.0 class image_processing(): def", "- time_start) time_start = time() with open(data_path+'/'+filename+'.@@@') as f: lines = f.readlines() for", "= np.dstack((self.img[0],self.img[1],self.img[2])) # self.img = self.img[0] self.img = cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY) # print(\"to gray", "= os.path.exists(data_path+'/'+filename+'.tif') # print(\"start job\") try: if file_exists is True: # raster =", "for k in sub_str] point = img_point(sub_str[0], sub_str[1], sub_str[2], sub_str[3]) self.main_points.append(point) else: self.img", "match_finder): self.img, self.cadr_scale, self.pixel_size = match_finder.rescale_cadr(self.img, self.pixel_size) self.kp, self.dp, self.img = match_finder.find_kp_dp(self.img) #", "if file_exists is True: # raster = gdal.Open(data_path+'/'+filename+'.tif') self.img = cv2.imread(data_path+'/'+filename+'.tif') else: #", "time import time @dataclass class img_point: pixel_y: int = 0 pixel_x: int =", "IS ENABLED\") return True else: # print(\"CUDA IS DISABLED\") return False except: #", "False def find_kp_dp_scale(self, match_finder): self.img, self.cadr_scale, self.pixel_size = match_finder.rescale_cadr(self.img, self.pixel_size) self.kp, self.dp, self.img", "IS DISABLED\") return False except: # print(\"CUDA IS DISABLED\") return False def find_kp_dp_scale(self,", "img # self.img = self.img[:,:,2] def find_pixel_size(self): self.g_c.initialiseReference(self.main_points[0].lat, self.main_points[0].lon, 0) x_1, y_1, z_1", "# print(\"CUDA IS DISABLED\") return False def find_kp_dp_scale(self, match_finder): self.img, self.cadr_scale, self.pixel_size =", "f: lines = f.readlines() for i in range(2, len(lines)): sub_str = lines[i].split(' ')", "pixel_y: int = 0 pixel_x: int = 0 lat: float = 0.0 lon:", "self.cadr_scale = 0.0 time_start = time() if filename is not None: home =", "not None: home = os.getenv(\"HOME\") data_path = home+'/copa5/map' file_exists = os.path.exists(data_path+'/'+filename+'.tif') # print(\"start", "0 lat: float = 0.0 lon: float = 0.0 class image_processing(): def __init__(self,", "# self.img = self.img[0] self.img = cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY) # print(\"to gray complete\", time()", "for i in range(2, len(lines)): sub_str = lines[i].split(' ') sub_str = [j for", "k in sub_str] point = img_point(sub_str[0], sub_str[1], sub_str[2], sub_str[3]) self.main_points.append(point) else: self.img =", "x_1, y_1, z_1 = self.g_c.geodetic2Ned(self.main_points[1].lat, self.main_points[1].lon, 0) x_2, y_2, z_2 = self.g_c.geodetic2Ned(self.main_points[3].lat, self.main_points[3].lon,", "else: y = y_2 pixel_size_1 = Decimal((abs(x)))/Decimal(self.img.shape[0]) pixel_size_2 = Decimal((abs(y)))/Decimal(self.img.shape[1]) pixel_size = (Decimal(pixel_size_1)", "find_pixel_size_by_height(self, height, poi): x = Decimal(np.tanh(poi/2)*2*height) self.pixel_size = x/Decimal(self.img.shape[1]) def is_cuda_cv(self): try: count", "pixel_size = (Decimal(pixel_size_1) + Decimal(pixel_size_2))/Decimal(2) self.pixel_size = pixel_size return pixel_size def find_pixel_size_by_height(self, height,", "0.0 class image_processing(): def __init__(self, filename = None, img = None): self.cuda =", "True: # raster = gdal.Open(data_path+'/'+filename+'.tif') self.img = cv2.imread(data_path+'/'+filename+'.tif') else: # raster = gdal.Open(data_path+'/'+filename+'.TIF')", "[] self.g_c = GeodeticConvert() self.img = None self.pixel_size = 0 self.kp = None", "def find_pixel_size_by_height(self, height, poi): x = Decimal(np.tanh(poi/2)*2*height) self.pixel_size = x/Decimal(self.img.shape[1]) def is_cuda_cv(self): try:", "= time() if filename is not None: home = os.getenv(\"HOME\") data_path = home+'/copa5/map'", "= pixel_size return pixel_size def find_pixel_size_by_height(self, height, poi): x = Decimal(np.tanh(poi/2)*2*height) self.pixel_size =", "= 0 pixel_x: int = 0 lat: float = 0.0 lon: float =", "numpy import reshape import rospy import cv2 import numpy as np from PIL", "sub_str] point = img_point(sub_str[0], sub_str[1], sub_str[2], sub_str[3]) self.main_points.append(point) else: self.img = img #", "from time import time @dataclass class img_point: pixel_y: int = 0 pixel_x: int", "home+'/copa5/map' file_exists = os.path.exists(data_path+'/'+filename+'.tif') # print(\"start job\") try: if file_exists is True: #", "in sub_str] point = img_point(sub_str[0], sub_str[1], sub_str[2], sub_str[3]) self.main_points.append(point) else: self.img = img", "open(data_path+'/'+filename+'.@@@') as f: lines = f.readlines() for i in range(2, len(lines)): sub_str =", "from PIL import Image import rospkg import gdal from dataclasses import dataclass from", "print(\"start job\") try: if file_exists is True: # raster = gdal.Open(data_path+'/'+filename+'.tif') self.img =", "self.img = cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY) # print(\"to gray complete\", time() - time_start) time_start =", "float = 0.0 class image_processing(): def __init__(self, filename = None, img = None):", "x_1 else: x = x_2 if abs(y_1) > abs(y_2): y = y_1 else:", "int = 0 pixel_x: int = 0 lat: float = 0.0 lon: float", "raster.ReadAsArray() # self.img = np.dstack((self.img[0],self.img[1],self.img[2])) # self.img = self.img[0] self.img = cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY)", "= gdal.Open(data_path+'/'+filename+'.tif') self.img = cv2.imread(data_path+'/'+filename+'.tif') else: # raster = gdal.Open(data_path+'/'+filename+'.TIF') self.img = cv2.imread(data_path+'/'+filename+'.TIF')", "self.dp = None self.cadr_scale = 0.0 time_start = time() if filename is not", "DISABLED\") return False except: # print(\"CUDA IS DISABLED\") return False def find_kp_dp_scale(self, match_finder):", "in range(2, len(lines)): sub_str = lines[i].split(' ') sub_str = [j for j in", "True else: # print(\"CUDA IS DISABLED\") return False except: # print(\"CUDA IS DISABLED\")", "= x_2 if abs(y_1) > abs(y_2): y = y_1 else: y = y_2", "os.getenv(\"HOME\") data_path = home+'/copa5/map' file_exists = os.path.exists(data_path+'/'+filename+'.tif') # print(\"start job\") try: if file_exists", "gray complete\", time() - time_start) time_start = time() with open(data_path+'/'+filename+'.@@@') as f: lines", "= self.img[0] self.img = cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY) # print(\"to gray complete\", time() - time_start)", "import Image import rospkg import gdal from dataclasses import dataclass from geodetic_conv import", "= cv2.imread(data_path+'/'+filename+'.TIF') except: # print(\"NO MAP FILE\") return None # print(\"map loaded\", time()", "self.g_c.initialiseReference(self.main_points[0].lat, self.main_points[0].lon, 0) x_1, y_1, z_1 = self.g_c.geodetic2Ned(self.main_points[1].lat, self.main_points[1].lon, 0) x_2, y_2, z_2", "e = 0 sub_str = [float(k) for k in sub_str] point = img_point(sub_str[0],", "DISABLED\") return False def find_kp_dp_scale(self, match_finder): self.img, self.cadr_scale, self.pixel_size = match_finder.rescale_cadr(self.img, self.pixel_size) self.kp,", "time() - time_start) time_start = time() with open(data_path+'/'+filename+'.@@@') as f: lines = f.readlines()", "file_exists = os.path.exists(data_path+'/'+filename+'.tif') # print(\"start job\") try: if file_exists is True: # raster", "= image_processing(filename = '26_12_2021_nn') # map_.find_pixel_size() # if __name__ == '__main__': # main()", "lat: float = 0.0 lon: float = 0.0 class image_processing(): def __init__(self, filename", "self.main_points[3].lon, 0) if abs(x_1) > abs(x_2): x = x_1 else: x = x_2", "float = 0.0 lon: float = 0.0 class image_processing(): def __init__(self, filename =", "main(): # map_ = image_processing(filename = '26_12_2021_nn') # map_.find_pixel_size() # if __name__ ==", "from cv2 import resize from numpy import reshape import rospy import cv2 import", "class image_processing(): def __init__(self, filename = None, img = None): self.cuda = self.is_cuda_cv()", "img_point(sub_str[0], sub_str[1], sub_str[2], sub_str[3]) self.main_points.append(point) else: self.img = img # self.img = self.img[:,:,2]", "def main(): # map_ = image_processing(filename = '26_12_2021_nn') # map_.find_pixel_size() # if __name__", "else: # print(\"CUDA IS DISABLED\") return False except: # print(\"CUDA IS DISABLED\") return", "None self.pixel_size = 0 self.kp = None self.dp = None self.cadr_scale = 0.0", "@dataclass class img_point: pixel_y: int = 0 pixel_x: int = 0 lat: float", "return pixel_size def find_pixel_size_by_height(self, height, poi): x = Decimal(np.tanh(poi/2)*2*height) self.pixel_size = x/Decimal(self.img.shape[1]) def", "None self.cadr_scale = 0.0 time_start = time() if filename is not None: home", "decimal import Decimal import os from time import time @dataclass class img_point: pixel_y:", "self.pixel_size = 0 self.kp = None self.dp = None self.cadr_scale = 0.0 time_start", "sub_str[2], sub_str[3]) self.main_points.append(point) else: self.img = img # self.img = self.img[:,:,2] def find_pixel_size(self):", "# map_ = image_processing(filename = '26_12_2021_nn') # map_.find_pixel_size() # if __name__ == '__main__':", "= os.getenv(\"HOME\") data_path = home+'/copa5/map' file_exists = os.path.exists(data_path+'/'+filename+'.tif') # print(\"start job\") try: if", "cv2.cuda.getCudaEnabledDeviceCount() if count > 0: # print(\"CUDA IS ENABLED\") return True else: #", "import gdal from dataclasses import dataclass from geodetic_conv import GeodeticConvert from decimal import", "sub_str.remove('\\n') except: e = 0 sub_str = [float(k) for k in sub_str] point", "= match_finder.rescale_cadr(self.img, self.pixel_size) self.kp, self.dp, self.img = match_finder.find_kp_dp(self.img) # def main(): # map_", "import reshape import rospy import cv2 import numpy as np from PIL import", "PIL import Image import rospkg import gdal from dataclasses import dataclass from geodetic_conv", "= None self.cadr_scale = 0.0 time_start = time() if filename is not None:", "match_finder.rescale_cadr(self.img, self.pixel_size) self.kp, self.dp, self.img = match_finder.find_kp_dp(self.img) # def main(): # map_ =", "int = 0 lat: float = 0.0 lon: float = 0.0 class image_processing():", "IS DISABLED\") return False def find_kp_dp_scale(self, match_finder): self.img, self.cadr_scale, self.pixel_size = match_finder.rescale_cadr(self.img, self.pixel_size)", "is True: # raster = gdal.Open(data_path+'/'+filename+'.tif') self.img = cv2.imread(data_path+'/'+filename+'.tif') else: # raster =", "time() if filename is not None: home = os.getenv(\"HOME\") data_path = home+'/copa5/map' file_exists", "0.0 time_start = time() if filename is not None: home = os.getenv(\"HOME\") data_path", "img = None): self.cuda = self.is_cuda_cv() self.main_points = [] self.g_c = GeodeticConvert() self.img", "self.main_points.append(point) else: self.img = img # self.img = self.img[:,:,2] def find_pixel_size(self): self.g_c.initialiseReference(self.main_points[0].lat, self.main_points[0].lon,", "resize from numpy import reshape import rospy import cv2 import numpy as np", "pixel_size_2 = Decimal((abs(y)))/Decimal(self.img.shape[1]) pixel_size = (Decimal(pixel_size_1) + Decimal(pixel_size_2))/Decimal(2) self.pixel_size = pixel_size return pixel_size", "= (Decimal(pixel_size_1) + Decimal(pixel_size_2))/Decimal(2) self.pixel_size = pixel_size return pixel_size def find_pixel_size_by_height(self, height, poi):", "sub_str[1], sub_str[2], sub_str[3]) self.main_points.append(point) else: self.img = img # self.img = self.img[:,:,2] def", "= None self.pixel_size = 0 self.kp = None self.dp = None self.cadr_scale =", "poi): x = Decimal(np.tanh(poi/2)*2*height) self.pixel_size = x/Decimal(self.img.shape[1]) def is_cuda_cv(self): try: count = cv2.cuda.getCudaEnabledDeviceCount()", "y = y_1 else: y = y_2 pixel_size_1 = Decimal((abs(x)))/Decimal(self.img.shape[0]) pixel_size_2 = Decimal((abs(y)))/Decimal(self.img.shape[1])", "def find_kp_dp_scale(self, match_finder): self.img, self.cadr_scale, self.pixel_size = match_finder.rescale_cadr(self.img, self.pixel_size) self.kp, self.dp, self.img =", "def __init__(self, filename = None, img = None): self.cuda = self.is_cuda_cv() self.main_points =", "try: count = cv2.cuda.getCudaEnabledDeviceCount() if count > 0: # print(\"CUDA IS ENABLED\") return", "Decimal(np.tanh(poi/2)*2*height) self.pixel_size = x/Decimal(self.img.shape[1]) def is_cuda_cv(self): try: count = cv2.cuda.getCudaEnabledDeviceCount() if count >", "0) x_2, y_2, z_2 = self.g_c.geodetic2Ned(self.main_points[3].lat, self.main_points[3].lon, 0) if abs(x_1) > abs(x_2): x", "(Decimal(pixel_size_1) + Decimal(pixel_size_2))/Decimal(2) self.pixel_size = pixel_size return pixel_size def find_pixel_size_by_height(self, height, poi): x", "loaded\", time() - time_start) time_start = time() # self.img = raster.ReadAsArray() # self.img", "self.img = img # self.img = self.img[:,:,2] def find_pixel_size(self): self.g_c.initialiseReference(self.main_points[0].lat, self.main_points[0].lon, 0) x_1,", "= self.g_c.geodetic2Ned(self.main_points[1].lat, self.main_points[1].lon, 0) x_2, y_2, z_2 = self.g_c.geodetic2Ned(self.main_points[3].lat, self.main_points[3].lon, 0) if abs(x_1)", "in sub_str if j] try: sub_str.remove('\\n') except: e = 0 sub_str = [float(k)", "if abs(y_1) > abs(y_2): y = y_1 else: y = y_2 pixel_size_1 =", "if abs(x_1) > abs(x_2): x = x_1 else: x = x_2 if abs(y_1)", "except: # print(\"NO MAP FILE\") return None # print(\"map loaded\", time() - time_start)", "def find_pixel_size(self): self.g_c.initialiseReference(self.main_points[0].lat, self.main_points[0].lon, 0) x_1, y_1, z_1 = self.g_c.geodetic2Ned(self.main_points[1].lat, self.main_points[1].lon, 0) x_2,", "j] try: sub_str.remove('\\n') except: e = 0 sub_str = [float(k) for k in", "gdal from dataclasses import dataclass from geodetic_conv import GeodeticConvert from decimal import Decimal", "self.img = None self.pixel_size = 0 self.kp = None self.dp = None self.cadr_scale", "= time() with open(data_path+'/'+filename+'.@@@') as f: lines = f.readlines() for i in range(2,", "Image import rospkg import gdal from dataclasses import dataclass from geodetic_conv import GeodeticConvert", "import rospkg import gdal from dataclasses import dataclass from geodetic_conv import GeodeticConvert from", "time @dataclass class img_point: pixel_y: int = 0 pixel_x: int = 0 lat:", "time_start = time() # self.img = raster.ReadAsArray() # self.img = np.dstack((self.img[0],self.img[1],self.img[2])) # self.img", "self.pixel_size = match_finder.rescale_cadr(self.img, self.pixel_size) self.kp, self.dp, self.img = match_finder.find_kp_dp(self.img) # def main(): #", "gdal.Open(data_path+'/'+filename+'.TIF') self.img = cv2.imread(data_path+'/'+filename+'.TIF') except: # print(\"NO MAP FILE\") return None # print(\"map", "Decimal((abs(y)))/Decimal(self.img.shape[1]) pixel_size = (Decimal(pixel_size_1) + Decimal(pixel_size_2))/Decimal(2) self.pixel_size = pixel_size return pixel_size def find_pixel_size_by_height(self,", "if j] try: sub_str.remove('\\n') except: e = 0 sub_str = [float(k) for k", "sub_str = [j for j in sub_str if j] try: sub_str.remove('\\n') except: e", "is_cuda_cv(self): try: count = cv2.cuda.getCudaEnabledDeviceCount() if count > 0: # print(\"CUDA IS ENABLED\")", "= time() # self.img = raster.ReadAsArray() # self.img = np.dstack((self.img[0],self.img[1],self.img[2])) # self.img =", "self.img = cv2.imread(data_path+'/'+filename+'.tif') else: # raster = gdal.Open(data_path+'/'+filename+'.TIF') self.img = cv2.imread(data_path+'/'+filename+'.TIF') except: #", "# def main(): # map_ = image_processing(filename = '26_12_2021_nn') # map_.find_pixel_size() # if", "self.main_points = [] self.g_c = GeodeticConvert() self.img = None self.pixel_size = 0 self.kp", "y = y_2 pixel_size_1 = Decimal((abs(x)))/Decimal(self.img.shape[0]) pixel_size_2 = Decimal((abs(y)))/Decimal(self.img.shape[1]) pixel_size = (Decimal(pixel_size_1) +", "self.g_c.geodetic2Ned(self.main_points[3].lat, self.main_points[3].lon, 0) if abs(x_1) > abs(x_2): x = x_1 else: x =", "return False except: # print(\"CUDA IS DISABLED\") return False def find_kp_dp_scale(self, match_finder): self.img,", "0 self.kp = None self.dp = None self.cadr_scale = 0.0 time_start = time()", "print(\"CUDA IS DISABLED\") return False def find_kp_dp_scale(self, match_finder): self.img, self.cadr_scale, self.pixel_size = match_finder.rescale_cadr(self.img,", "= raster.ReadAsArray() # self.img = np.dstack((self.img[0],self.img[1],self.img[2])) # self.img = self.img[0] self.img = cv2.cvtColor(self.img,", "except: # print(\"CUDA IS DISABLED\") return False def find_kp_dp_scale(self, match_finder): self.img, self.cadr_scale, self.pixel_size", "import resize from numpy import reshape import rospy import cv2 import numpy as", "= f.readlines() for i in range(2, len(lines)): sub_str = lines[i].split(' ') sub_str =", "import time @dataclass class img_point: pixel_y: int = 0 pixel_x: int = 0", "img_point: pixel_y: int = 0 pixel_x: int = 0 lat: float = 0.0", "rospy import cv2 import numpy as np from PIL import Image import rospkg", "print(\"CUDA IS DISABLED\") return False except: # print(\"CUDA IS DISABLED\") return False def", "self.g_c.geodetic2Ned(self.main_points[1].lat, self.main_points[1].lon, 0) x_2, y_2, z_2 = self.g_c.geodetic2Ned(self.main_points[3].lat, self.main_points[3].lon, 0) if abs(x_1) >", "file_exists is True: # raster = gdal.Open(data_path+'/'+filename+'.tif') self.img = cv2.imread(data_path+'/'+filename+'.tif') else: # raster", "cv2.imread(data_path+'/'+filename+'.TIF') except: # print(\"NO MAP FILE\") return None # print(\"map loaded\", time() -", "self.img[0] self.img = cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY) # print(\"to gray complete\", time() - time_start) time_start", "= 0 sub_str = [float(k) for k in sub_str] point = img_point(sub_str[0], sub_str[1],", "# print(\"NO MAP FILE\") return None # print(\"map loaded\", time() - time_start) time_start", "= y_2 pixel_size_1 = Decimal((abs(x)))/Decimal(self.img.shape[0]) pixel_size_2 = Decimal((abs(y)))/Decimal(self.img.shape[1]) pixel_size = (Decimal(pixel_size_1) + Decimal(pixel_size_2))/Decimal(2)", "') sub_str = [j for j in sub_str if j] try: sub_str.remove('\\n') except:", "sub_str if j] try: sub_str.remove('\\n') except: e = 0 sub_str = [float(k) for", "time() - time_start) time_start = time() # self.img = raster.ReadAsArray() # self.img =", "as np from PIL import Image import rospkg import gdal from dataclasses import", "= y_1 else: y = y_2 pixel_size_1 = Decimal((abs(x)))/Decimal(self.img.shape[0]) pixel_size_2 = Decimal((abs(y)))/Decimal(self.img.shape[1]) pixel_size", "False except: # print(\"CUDA IS DISABLED\") return False def find_kp_dp_scale(self, match_finder): self.img, self.cadr_scale,", "find_pixel_size(self): self.g_c.initialiseReference(self.main_points[0].lat, self.main_points[0].lon, 0) x_1, y_1, z_1 = self.g_c.geodetic2Ned(self.main_points[1].lat, self.main_points[1].lon, 0) x_2, y_2,", "__init__(self, filename = None, img = None): self.cuda = self.is_cuda_cv() self.main_points = []", "= 0.0 lon: float = 0.0 class image_processing(): def __init__(self, filename = None,", "time() # self.img = raster.ReadAsArray() # self.img = np.dstack((self.img[0],self.img[1],self.img[2])) # self.img = self.img[0]", "image_processing(): def __init__(self, filename = None, img = None): self.cuda = self.is_cuda_cv() self.main_points", "= self.g_c.geodetic2Ned(self.main_points[3].lat, self.main_points[3].lon, 0) if abs(x_1) > abs(x_2): x = x_1 else: x", "x = Decimal(np.tanh(poi/2)*2*height) self.pixel_size = x/Decimal(self.img.shape[1]) def is_cuda_cv(self): try: count = cv2.cuda.getCudaEnabledDeviceCount() if", "np from PIL import Image import rospkg import gdal from dataclasses import dataclass", "= img_point(sub_str[0], sub_str[1], sub_str[2], sub_str[3]) self.main_points.append(point) else: self.img = img # self.img =", "class img_point: pixel_y: int = 0 pixel_x: int = 0 lat: float =", "x/Decimal(self.img.shape[1]) def is_cuda_cv(self): try: count = cv2.cuda.getCudaEnabledDeviceCount() if count > 0: # print(\"CUDA", "y_1, z_1 = self.g_c.geodetic2Ned(self.main_points[1].lat, self.main_points[1].lon, 0) x_2, y_2, z_2 = self.g_c.geodetic2Ned(self.main_points[3].lat, self.main_points[3].lon, 0)", "else: self.img = img # self.img = self.img[:,:,2] def find_pixel_size(self): self.g_c.initialiseReference(self.main_points[0].lat, self.main_points[0].lon, 0)", "for j in sub_str if j] try: sub_str.remove('\\n') except: e = 0 sub_str", "= [float(k) for k in sub_str] point = img_point(sub_str[0], sub_str[1], sub_str[2], sub_str[3]) self.main_points.append(point)", "z_1 = self.g_c.geodetic2Ned(self.main_points[1].lat, self.main_points[1].lon, 0) x_2, y_2, z_2 = self.g_c.geodetic2Ned(self.main_points[3].lat, self.main_points[3].lon, 0) if", "pixel_size return pixel_size def find_pixel_size_by_height(self, height, poi): x = Decimal(np.tanh(poi/2)*2*height) self.pixel_size = x/Decimal(self.img.shape[1])", "dataclass from geodetic_conv import GeodeticConvert from decimal import Decimal import os from time", "self.img = cv2.imread(data_path+'/'+filename+'.TIF') except: # print(\"NO MAP FILE\") return None # print(\"map loaded\",", "FILE\") return None # print(\"map loaded\", time() - time_start) time_start = time() #", "return False def find_kp_dp_scale(self, match_finder): self.img, self.cadr_scale, self.pixel_size = match_finder.rescale_cadr(self.img, self.pixel_size) self.kp, self.dp,", "map_ = image_processing(filename = '26_12_2021_nn') # map_.find_pixel_size() # if __name__ == '__main__': #", "np.dstack((self.img[0],self.img[1],self.img[2])) # self.img = self.img[0] self.img = cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY) # print(\"to gray complete\",", "self.kp = None self.dp = None self.cadr_scale = 0.0 time_start = time() if", "return True else: # print(\"CUDA IS DISABLED\") return False except: # print(\"CUDA IS", "= self.img[:,:,2] def find_pixel_size(self): self.g_c.initialiseReference(self.main_points[0].lat, self.main_points[0].lon, 0) x_1, y_1, z_1 = self.g_c.geodetic2Ned(self.main_points[1].lat, self.main_points[1].lon,", "return None # print(\"map loaded\", time() - time_start) time_start = time() # self.img", "self.img[:,:,2] def find_pixel_size(self): self.g_c.initialiseReference(self.main_points[0].lat, self.main_points[0].lon, 0) x_1, y_1, z_1 = self.g_c.geodetic2Ned(self.main_points[1].lat, self.main_points[1].lon, 0)", "import GeodeticConvert from decimal import Decimal import os from time import time @dataclass", "#!/usr/bin/env python3 from cv2 import resize from numpy import reshape import rospy import", "def is_cuda_cv(self): try: count = cv2.cuda.getCudaEnabledDeviceCount() if count > 0: # print(\"CUDA IS", "try: if file_exists is True: # raster = gdal.Open(data_path+'/'+filename+'.tif') self.img = cv2.imread(data_path+'/'+filename+'.tif') else:", "abs(x_1) > abs(x_2): x = x_1 else: x = x_2 if abs(y_1) >", "[float(k) for k in sub_str] point = img_point(sub_str[0], sub_str[1], sub_str[2], sub_str[3]) self.main_points.append(point) else:", "from dataclasses import dataclass from geodetic_conv import GeodeticConvert from decimal import Decimal import", "y_2 pixel_size_1 = Decimal((abs(x)))/Decimal(self.img.shape[0]) pixel_size_2 = Decimal((abs(y)))/Decimal(self.img.shape[1]) pixel_size = (Decimal(pixel_size_1) + Decimal(pixel_size_2))/Decimal(2) self.pixel_size", "= x/Decimal(self.img.shape[1]) def is_cuda_cv(self): try: count = cv2.cuda.getCudaEnabledDeviceCount() if count > 0: #", "MAP FILE\") return None # print(\"map loaded\", time() - time_start) time_start = time()", "print(\"map loaded\", time() - time_start) time_start = time() # self.img = raster.ReadAsArray() #", "# self.img = raster.ReadAsArray() # self.img = np.dstack((self.img[0],self.img[1],self.img[2])) # self.img = self.img[0] self.img", "find_kp_dp_scale(self, match_finder): self.img, self.cadr_scale, self.pixel_size = match_finder.rescale_cadr(self.img, self.pixel_size) self.kp, self.dp, self.img = match_finder.find_kp_dp(self.img)", "pixel_x: int = 0 lat: float = 0.0 lon: float = 0.0 class", "complete\", time() - time_start) time_start = time() with open(data_path+'/'+filename+'.@@@') as f: lines =", "match_finder.find_kp_dp(self.img) # def main(): # map_ = image_processing(filename = '26_12_2021_nn') # map_.find_pixel_size() #", "Decimal((abs(x)))/Decimal(self.img.shape[0]) pixel_size_2 = Decimal((abs(y)))/Decimal(self.img.shape[1]) pixel_size = (Decimal(pixel_size_1) + Decimal(pixel_size_2))/Decimal(2) self.pixel_size = pixel_size return", "self.pixel_size = pixel_size return pixel_size def find_pixel_size_by_height(self, height, poi): x = Decimal(np.tanh(poi/2)*2*height) self.pixel_size", "[j for j in sub_str if j] try: sub_str.remove('\\n') except: e = 0", "range(2, len(lines)): sub_str = lines[i].split(' ') sub_str = [j for j in sub_str", "j in sub_str if j] try: sub_str.remove('\\n') except: e = 0 sub_str =", "reshape import rospy import cv2 import numpy as np from PIL import Image", "pixel_size_1 = Decimal((abs(x)))/Decimal(self.img.shape[0]) pixel_size_2 = Decimal((abs(y)))/Decimal(self.img.shape[1]) pixel_size = (Decimal(pixel_size_1) + Decimal(pixel_size_2))/Decimal(2) self.pixel_size =", "# print(\"start job\") try: if file_exists is True: # raster = gdal.Open(data_path+'/'+filename+'.tif') self.img", "cv2 import resize from numpy import reshape import rospy import cv2 import numpy" ]
[ "render_template, url_for from markupsafe import Markup from mass_flask_core.models import FileSample, IPSample, DomainSample, URISample,", "return Markup('<i class=\"fa fa-at\"></i>') else: return Markup('<i class=\"fa fa-question\"></i>') def is_file_sample(sample): return isinstance(sample,", "else: return Markup('<i class=\"fa fa-question\"></i>') def is_file_sample(sample): return isinstance(sample, FileSample) def is_executable_binary_sample(sample): return", "class=\"fa fa-globe\"></i>') elif isinstance(sample, URISample): return Markup('<i class=\"fa fa-at\"></i>') else: return Markup('<i class=\"fa", "class=\"fa fa-question\"></i>') def is_file_sample(sample): return isinstance(sample, FileSample) def is_executable_binary_sample(sample): return isinstance(sample, ExecutableBinarySample) def", "webui_blueprint @webui_blueprint.context_processor def sample_processors(): def sample_icon(sample): if isinstance(sample, FileSample): return Markup('<i class=\"fa fa-file\"></i>')", "'Manager' elif user.user_level == UserLevel.USER_LEVEL_PRIVILEGED: return 'Privileged user' elif user.user_level == UserLevel.USER_LEVEL_USER: return", "== UserLevel.USER_LEVEL_ANONYMOUS: return 'Guest user' else: return 'Unknown user level' return dict( user_level=user_level", "class=\"fa fa-at\"></i>') else: return Markup('<i class=\"fa fa-question\"></i>') def is_file_sample(sample): return isinstance(sample, FileSample) def", "user_level=user_level ) @webui_blueprint.context_processor def generic_processors(): def mass_version(): return current_app.version def pagination(paginator): return Markup(render_template('pagination.html',", "**kwargs) return dict( sample_icon=sample_icon, is_file_sample=is_file_sample, is_executable_binary_sample=is_executable_binary_sample, tag_search_link=tag_search_link ) @webui_blueprint.context_processor def user_processors(): def user_level(user):", "import Markup from mass_flask_core.models import FileSample, IPSample, DomainSample, URISample, ExecutableBinarySample, UserLevel from mass_flask_webui.config", "fa-question\"></i>') def is_file_sample(sample): return isinstance(sample, FileSample) def is_executable_binary_sample(sample): return isinstance(sample, ExecutableBinarySample) def tag_search_link(tag):", "isinstance(sample, ExecutableBinarySample) def tag_search_link(tag): kwargs = { 'common-tags': tag, 'submit': 'Submit' } return", "if isinstance(sample, FileSample): return Markup('<i class=\"fa fa-file\"></i>') elif isinstance(sample, IPSample): return Markup('<i class=\"fa", "current_app, render_template, url_for from markupsafe import Markup from mass_flask_core.models import FileSample, IPSample, DomainSample,", "url_for from markupsafe import Markup from mass_flask_core.models import FileSample, IPSample, DomainSample, URISample, ExecutableBinarySample,", "ExecutableBinarySample) def tag_search_link(tag): kwargs = { 'common-tags': tag, 'submit': 'Submit' } return url_for('.sample_search',", "class=\"fa fa-desktop\"></i>') elif isinstance(sample, DomainSample): return Markup('<i class=\"fa fa-globe\"></i>') elif isinstance(sample, URISample): return", "'Unknown user level' return dict( user_level=user_level ) @webui_blueprint.context_processor def generic_processors(): def mass_version(): return", "'submit': 'Submit' } return url_for('.sample_search', **kwargs) return dict( sample_icon=sample_icon, is_file_sample=is_file_sample, is_executable_binary_sample=is_executable_binary_sample, tag_search_link=tag_search_link )", "return 'Normal user' elif user.user_level == UserLevel.USER_LEVEL_ANONYMOUS: return 'Guest user' else: return 'Unknown", "tag_search_link(tag): kwargs = { 'common-tags': tag, 'submit': 'Submit' } return url_for('.sample_search', **kwargs) return", "UserLevel.USER_LEVEL_ANONYMOUS: return 'Guest user' else: return 'Unknown user level' return dict( user_level=user_level )", "user level' return dict( user_level=user_level ) @webui_blueprint.context_processor def generic_processors(): def mass_version(): return current_app.version", "= { 'common-tags': tag, 'submit': 'Submit' } return url_for('.sample_search', **kwargs) return dict( sample_icon=sample_icon,", ") @webui_blueprint.context_processor def generic_processors(): def mass_version(): return current_app.version def pagination(paginator): return Markup(render_template('pagination.html', paginator=paginator))", "elif user.user_level == UserLevel.USER_LEVEL_USER: return 'Normal user' elif user.user_level == UserLevel.USER_LEVEL_ANONYMOUS: return 'Guest", "return 'Privileged user' elif user.user_level == UserLevel.USER_LEVEL_USER: return 'Normal user' elif user.user_level ==", "elif isinstance(sample, URISample): return Markup('<i class=\"fa fa-at\"></i>') else: return Markup('<i class=\"fa fa-question\"></i>') def", "user_level(user): if user.user_level == UserLevel.USER_LEVEL_ADMIN: return 'Administrator' elif user.user_level == UserLevel.USER_LEVEL_MANAGER: return 'Manager'", "Markup from mass_flask_core.models import FileSample, IPSample, DomainSample, URISample, ExecutableBinarySample, UserLevel from mass_flask_webui.config import", "FileSample): return Markup('<i class=\"fa fa-file\"></i>') elif isinstance(sample, IPSample): return Markup('<i class=\"fa fa-desktop\"></i>') elif", "dict( sample_icon=sample_icon, is_file_sample=is_file_sample, is_executable_binary_sample=is_executable_binary_sample, tag_search_link=tag_search_link ) @webui_blueprint.context_processor def user_processors(): def user_level(user): if user.user_level", "is_file_sample(sample): return isinstance(sample, FileSample) def is_executable_binary_sample(sample): return isinstance(sample, ExecutableBinarySample) def tag_search_link(tag): kwargs =", "return isinstance(sample, ExecutableBinarySample) def tag_search_link(tag): kwargs = { 'common-tags': tag, 'submit': 'Submit' }", "isinstance(sample, URISample): return Markup('<i class=\"fa fa-at\"></i>') else: return Markup('<i class=\"fa fa-question\"></i>') def is_file_sample(sample):", "from mass_flask_webui.config import webui_blueprint @webui_blueprint.context_processor def sample_processors(): def sample_icon(sample): if isinstance(sample, FileSample): return", "return Markup('<i class=\"fa fa-globe\"></i>') elif isinstance(sample, URISample): return Markup('<i class=\"fa fa-at\"></i>') else: return", "user_processors(): def user_level(user): if user.user_level == UserLevel.USER_LEVEL_ADMIN: return 'Administrator' elif user.user_level == UserLevel.USER_LEVEL_MANAGER:", "sample_icon(sample): if isinstance(sample, FileSample): return Markup('<i class=\"fa fa-file\"></i>') elif isinstance(sample, IPSample): return Markup('<i", "IPSample, DomainSample, URISample, ExecutableBinarySample, UserLevel from mass_flask_webui.config import webui_blueprint @webui_blueprint.context_processor def sample_processors(): def", "'Guest user' else: return 'Unknown user level' return dict( user_level=user_level ) @webui_blueprint.context_processor def", "isinstance(sample, FileSample): return Markup('<i class=\"fa fa-file\"></i>') elif isinstance(sample, IPSample): return Markup('<i class=\"fa fa-desktop\"></i>')", "kwargs = { 'common-tags': tag, 'submit': 'Submit' } return url_for('.sample_search', **kwargs) return dict(", "FileSample, IPSample, DomainSample, URISample, ExecutableBinarySample, UserLevel from mass_flask_webui.config import webui_blueprint @webui_blueprint.context_processor def sample_processors():", "user.user_level == UserLevel.USER_LEVEL_ADMIN: return 'Administrator' elif user.user_level == UserLevel.USER_LEVEL_MANAGER: return 'Manager' elif user.user_level", "UserLevel.USER_LEVEL_MANAGER: return 'Manager' elif user.user_level == UserLevel.USER_LEVEL_PRIVILEGED: return 'Privileged user' elif user.user_level ==", "== UserLevel.USER_LEVEL_USER: return 'Normal user' elif user.user_level == UserLevel.USER_LEVEL_ANONYMOUS: return 'Guest user' else:", "def generic_processors(): def mass_version(): return current_app.version def pagination(paginator): return Markup(render_template('pagination.html', paginator=paginator)) return dict(", "return dict( sample_icon=sample_icon, is_file_sample=is_file_sample, is_executable_binary_sample=is_executable_binary_sample, tag_search_link=tag_search_link ) @webui_blueprint.context_processor def user_processors(): def user_level(user): if", "DomainSample): return Markup('<i class=\"fa fa-globe\"></i>') elif isinstance(sample, URISample): return Markup('<i class=\"fa fa-at\"></i>') else:", "from flask import current_app, render_template, url_for from markupsafe import Markup from mass_flask_core.models import", "IPSample): return Markup('<i class=\"fa fa-desktop\"></i>') elif isinstance(sample, DomainSample): return Markup('<i class=\"fa fa-globe\"></i>') elif", "FileSample) def is_executable_binary_sample(sample): return isinstance(sample, ExecutableBinarySample) def tag_search_link(tag): kwargs = { 'common-tags': tag,", "url_for('.sample_search', **kwargs) return dict( sample_icon=sample_icon, is_file_sample=is_file_sample, is_executable_binary_sample=is_executable_binary_sample, tag_search_link=tag_search_link ) @webui_blueprint.context_processor def user_processors(): def", "return 'Administrator' elif user.user_level == UserLevel.USER_LEVEL_MANAGER: return 'Manager' elif user.user_level == UserLevel.USER_LEVEL_PRIVILEGED: return", "mass_version(): return current_app.version def pagination(paginator): return Markup(render_template('pagination.html', paginator=paginator)) return dict( mass_version=mass_version, pagination=pagination )", "ExecutableBinarySample, UserLevel from mass_flask_webui.config import webui_blueprint @webui_blueprint.context_processor def sample_processors(): def sample_icon(sample): if isinstance(sample,", "generic_processors(): def mass_version(): return current_app.version def pagination(paginator): return Markup(render_template('pagination.html', paginator=paginator)) return dict( mass_version=mass_version,", "user.user_level == UserLevel.USER_LEVEL_PRIVILEGED: return 'Privileged user' elif user.user_level == UserLevel.USER_LEVEL_USER: return 'Normal user'", "def tag_search_link(tag): kwargs = { 'common-tags': tag, 'submit': 'Submit' } return url_for('.sample_search', **kwargs)", "fa-desktop\"></i>') elif isinstance(sample, DomainSample): return Markup('<i class=\"fa fa-globe\"></i>') elif isinstance(sample, URISample): return Markup('<i", "elif isinstance(sample, DomainSample): return Markup('<i class=\"fa fa-globe\"></i>') elif isinstance(sample, URISample): return Markup('<i class=\"fa", "DomainSample, URISample, ExecutableBinarySample, UserLevel from mass_flask_webui.config import webui_blueprint @webui_blueprint.context_processor def sample_processors(): def sample_icon(sample):", "flask import current_app, render_template, url_for from markupsafe import Markup from mass_flask_core.models import FileSample,", "def mass_version(): return current_app.version def pagination(paginator): return Markup(render_template('pagination.html', paginator=paginator)) return dict( mass_version=mass_version, pagination=pagination", "UserLevel.USER_LEVEL_ADMIN: return 'Administrator' elif user.user_level == UserLevel.USER_LEVEL_MANAGER: return 'Manager' elif user.user_level == UserLevel.USER_LEVEL_PRIVILEGED:", "UserLevel from mass_flask_webui.config import webui_blueprint @webui_blueprint.context_processor def sample_processors(): def sample_icon(sample): if isinstance(sample, FileSample):", "from mass_flask_core.models import FileSample, IPSample, DomainSample, URISample, ExecutableBinarySample, UserLevel from mass_flask_webui.config import webui_blueprint", "sample_processors(): def sample_icon(sample): if isinstance(sample, FileSample): return Markup('<i class=\"fa fa-file\"></i>') elif isinstance(sample, IPSample):", "return url_for('.sample_search', **kwargs) return dict( sample_icon=sample_icon, is_file_sample=is_file_sample, is_executable_binary_sample=is_executable_binary_sample, tag_search_link=tag_search_link ) @webui_blueprint.context_processor def user_processors():", "@webui_blueprint.context_processor def sample_processors(): def sample_icon(sample): if isinstance(sample, FileSample): return Markup('<i class=\"fa fa-file\"></i>') elif", "import current_app, render_template, url_for from markupsafe import Markup from mass_flask_core.models import FileSample, IPSample,", "isinstance(sample, DomainSample): return Markup('<i class=\"fa fa-globe\"></i>') elif isinstance(sample, URISample): return Markup('<i class=\"fa fa-at\"></i>')", "if user.user_level == UserLevel.USER_LEVEL_ADMIN: return 'Administrator' elif user.user_level == UserLevel.USER_LEVEL_MANAGER: return 'Manager' elif", "user' else: return 'Unknown user level' return dict( user_level=user_level ) @webui_blueprint.context_processor def generic_processors():", "user.user_level == UserLevel.USER_LEVEL_USER: return 'Normal user' elif user.user_level == UserLevel.USER_LEVEL_ANONYMOUS: return 'Guest user'", "return 'Unknown user level' return dict( user_level=user_level ) @webui_blueprint.context_processor def generic_processors(): def mass_version():", "sample_icon=sample_icon, is_file_sample=is_file_sample, is_executable_binary_sample=is_executable_binary_sample, tag_search_link=tag_search_link ) @webui_blueprint.context_processor def user_processors(): def user_level(user): if user.user_level ==", "Markup('<i class=\"fa fa-file\"></i>') elif isinstance(sample, IPSample): return Markup('<i class=\"fa fa-desktop\"></i>') elif isinstance(sample, DomainSample):", "fa-at\"></i>') else: return Markup('<i class=\"fa fa-question\"></i>') def is_file_sample(sample): return isinstance(sample, FileSample) def is_executable_binary_sample(sample):", "is_executable_binary_sample=is_executable_binary_sample, tag_search_link=tag_search_link ) @webui_blueprint.context_processor def user_processors(): def user_level(user): if user.user_level == UserLevel.USER_LEVEL_ADMIN: return", "elif user.user_level == UserLevel.USER_LEVEL_ANONYMOUS: return 'Guest user' else: return 'Unknown user level' return", "import FileSample, IPSample, DomainSample, URISample, ExecutableBinarySample, UserLevel from mass_flask_webui.config import webui_blueprint @webui_blueprint.context_processor def", "URISample): return Markup('<i class=\"fa fa-at\"></i>') else: return Markup('<i class=\"fa fa-question\"></i>') def is_file_sample(sample): return", "Markup('<i class=\"fa fa-desktop\"></i>') elif isinstance(sample, DomainSample): return Markup('<i class=\"fa fa-globe\"></i>') elif isinstance(sample, URISample):", "== UserLevel.USER_LEVEL_PRIVILEGED: return 'Privileged user' elif user.user_level == UserLevel.USER_LEVEL_USER: return 'Normal user' elif", "return Markup('<i class=\"fa fa-file\"></i>') elif isinstance(sample, IPSample): return Markup('<i class=\"fa fa-desktop\"></i>') elif isinstance(sample,", "mass_flask_webui.config import webui_blueprint @webui_blueprint.context_processor def sample_processors(): def sample_icon(sample): if isinstance(sample, FileSample): return Markup('<i", "class=\"fa fa-file\"></i>') elif isinstance(sample, IPSample): return Markup('<i class=\"fa fa-desktop\"></i>') elif isinstance(sample, DomainSample): return", "Markup('<i class=\"fa fa-globe\"></i>') elif isinstance(sample, URISample): return Markup('<i class=\"fa fa-at\"></i>') else: return Markup('<i", "user.user_level == UserLevel.USER_LEVEL_ANONYMOUS: return 'Guest user' else: return 'Unknown user level' return dict(", "'common-tags': tag, 'submit': 'Submit' } return url_for('.sample_search', **kwargs) return dict( sample_icon=sample_icon, is_file_sample=is_file_sample, is_executable_binary_sample=is_executable_binary_sample,", "def user_level(user): if user.user_level == UserLevel.USER_LEVEL_ADMIN: return 'Administrator' elif user.user_level == UserLevel.USER_LEVEL_MANAGER: return", "return Markup('<i class=\"fa fa-question\"></i>') def is_file_sample(sample): return isinstance(sample, FileSample) def is_executable_binary_sample(sample): return isinstance(sample,", "mass_flask_core.models import FileSample, IPSample, DomainSample, URISample, ExecutableBinarySample, UserLevel from mass_flask_webui.config import webui_blueprint @webui_blueprint.context_processor", "fa-globe\"></i>') elif isinstance(sample, URISample): return Markup('<i class=\"fa fa-at\"></i>') else: return Markup('<i class=\"fa fa-question\"></i>')", "URISample, ExecutableBinarySample, UserLevel from mass_flask_webui.config import webui_blueprint @webui_blueprint.context_processor def sample_processors(): def sample_icon(sample): if", "return dict( user_level=user_level ) @webui_blueprint.context_processor def generic_processors(): def mass_version(): return current_app.version def pagination(paginator):", "'Normal user' elif user.user_level == UserLevel.USER_LEVEL_ANONYMOUS: return 'Guest user' else: return 'Unknown user", "return 'Guest user' else: return 'Unknown user level' return dict( user_level=user_level ) @webui_blueprint.context_processor", "== UserLevel.USER_LEVEL_ADMIN: return 'Administrator' elif user.user_level == UserLevel.USER_LEVEL_MANAGER: return 'Manager' elif user.user_level ==", ") @webui_blueprint.context_processor def user_processors(): def user_level(user): if user.user_level == UserLevel.USER_LEVEL_ADMIN: return 'Administrator' elif", "'Privileged user' elif user.user_level == UserLevel.USER_LEVEL_USER: return 'Normal user' elif user.user_level == UserLevel.USER_LEVEL_ANONYMOUS:", "tag_search_link=tag_search_link ) @webui_blueprint.context_processor def user_processors(): def user_level(user): if user.user_level == UserLevel.USER_LEVEL_ADMIN: return 'Administrator'", "user' elif user.user_level == UserLevel.USER_LEVEL_USER: return 'Normal user' elif user.user_level == UserLevel.USER_LEVEL_ANONYMOUS: return", "isinstance(sample, FileSample) def is_executable_binary_sample(sample): return isinstance(sample, ExecutableBinarySample) def tag_search_link(tag): kwargs = { 'common-tags':", "return isinstance(sample, FileSample) def is_executable_binary_sample(sample): return isinstance(sample, ExecutableBinarySample) def tag_search_link(tag): kwargs = {", "{ 'common-tags': tag, 'submit': 'Submit' } return url_for('.sample_search', **kwargs) return dict( sample_icon=sample_icon, is_file_sample=is_file_sample,", "def is_executable_binary_sample(sample): return isinstance(sample, ExecutableBinarySample) def tag_search_link(tag): kwargs = { 'common-tags': tag, 'submit':", "UserLevel.USER_LEVEL_USER: return 'Normal user' elif user.user_level == UserLevel.USER_LEVEL_ANONYMOUS: return 'Guest user' else: return", "'Administrator' elif user.user_level == UserLevel.USER_LEVEL_MANAGER: return 'Manager' elif user.user_level == UserLevel.USER_LEVEL_PRIVILEGED: return 'Privileged", "return 'Manager' elif user.user_level == UserLevel.USER_LEVEL_PRIVILEGED: return 'Privileged user' elif user.user_level == UserLevel.USER_LEVEL_USER:", "tag, 'submit': 'Submit' } return url_for('.sample_search', **kwargs) return dict( sample_icon=sample_icon, is_file_sample=is_file_sample, is_executable_binary_sample=is_executable_binary_sample, tag_search_link=tag_search_link", "user.user_level == UserLevel.USER_LEVEL_MANAGER: return 'Manager' elif user.user_level == UserLevel.USER_LEVEL_PRIVILEGED: return 'Privileged user' elif", "else: return 'Unknown user level' return dict( user_level=user_level ) @webui_blueprint.context_processor def generic_processors(): def", "return Markup('<i class=\"fa fa-desktop\"></i>') elif isinstance(sample, DomainSample): return Markup('<i class=\"fa fa-globe\"></i>') elif isinstance(sample,", "'Submit' } return url_for('.sample_search', **kwargs) return dict( sample_icon=sample_icon, is_file_sample=is_file_sample, is_executable_binary_sample=is_executable_binary_sample, tag_search_link=tag_search_link ) @webui_blueprint.context_processor", "elif user.user_level == UserLevel.USER_LEVEL_PRIVILEGED: return 'Privileged user' elif user.user_level == UserLevel.USER_LEVEL_USER: return 'Normal", "import webui_blueprint @webui_blueprint.context_processor def sample_processors(): def sample_icon(sample): if isinstance(sample, FileSample): return Markup('<i class=\"fa", "== UserLevel.USER_LEVEL_MANAGER: return 'Manager' elif user.user_level == UserLevel.USER_LEVEL_PRIVILEGED: return 'Privileged user' elif user.user_level", "from markupsafe import Markup from mass_flask_core.models import FileSample, IPSample, DomainSample, URISample, ExecutableBinarySample, UserLevel", "def is_file_sample(sample): return isinstance(sample, FileSample) def is_executable_binary_sample(sample): return isinstance(sample, ExecutableBinarySample) def tag_search_link(tag): kwargs", "@webui_blueprint.context_processor def user_processors(): def user_level(user): if user.user_level == UserLevel.USER_LEVEL_ADMIN: return 'Administrator' elif user.user_level", "elif user.user_level == UserLevel.USER_LEVEL_MANAGER: return 'Manager' elif user.user_level == UserLevel.USER_LEVEL_PRIVILEGED: return 'Privileged user'", "def user_processors(): def user_level(user): if user.user_level == UserLevel.USER_LEVEL_ADMIN: return 'Administrator' elif user.user_level ==", "fa-file\"></i>') elif isinstance(sample, IPSample): return Markup('<i class=\"fa fa-desktop\"></i>') elif isinstance(sample, DomainSample): return Markup('<i", "user' elif user.user_level == UserLevel.USER_LEVEL_ANONYMOUS: return 'Guest user' else: return 'Unknown user level'", "is_executable_binary_sample(sample): return isinstance(sample, ExecutableBinarySample) def tag_search_link(tag): kwargs = { 'common-tags': tag, 'submit': 'Submit'", "UserLevel.USER_LEVEL_PRIVILEGED: return 'Privileged user' elif user.user_level == UserLevel.USER_LEVEL_USER: return 'Normal user' elif user.user_level", "def sample_processors(): def sample_icon(sample): if isinstance(sample, FileSample): return Markup('<i class=\"fa fa-file\"></i>') elif isinstance(sample,", "dict( user_level=user_level ) @webui_blueprint.context_processor def generic_processors(): def mass_version(): return current_app.version def pagination(paginator): return", "level' return dict( user_level=user_level ) @webui_blueprint.context_processor def generic_processors(): def mass_version(): return current_app.version def", "Markup('<i class=\"fa fa-at\"></i>') else: return Markup('<i class=\"fa fa-question\"></i>') def is_file_sample(sample): return isinstance(sample, FileSample)", "} return url_for('.sample_search', **kwargs) return dict( sample_icon=sample_icon, is_file_sample=is_file_sample, is_executable_binary_sample=is_executable_binary_sample, tag_search_link=tag_search_link ) @webui_blueprint.context_processor def", "def sample_icon(sample): if isinstance(sample, FileSample): return Markup('<i class=\"fa fa-file\"></i>') elif isinstance(sample, IPSample): return", "@webui_blueprint.context_processor def generic_processors(): def mass_version(): return current_app.version def pagination(paginator): return Markup(render_template('pagination.html', paginator=paginator)) return", "isinstance(sample, IPSample): return Markup('<i class=\"fa fa-desktop\"></i>') elif isinstance(sample, DomainSample): return Markup('<i class=\"fa fa-globe\"></i>')", "Markup('<i class=\"fa fa-question\"></i>') def is_file_sample(sample): return isinstance(sample, FileSample) def is_executable_binary_sample(sample): return isinstance(sample, ExecutableBinarySample)", "is_file_sample=is_file_sample, is_executable_binary_sample=is_executable_binary_sample, tag_search_link=tag_search_link ) @webui_blueprint.context_processor def user_processors(): def user_level(user): if user.user_level == UserLevel.USER_LEVEL_ADMIN:", "elif isinstance(sample, IPSample): return Markup('<i class=\"fa fa-desktop\"></i>') elif isinstance(sample, DomainSample): return Markup('<i class=\"fa", "markupsafe import Markup from mass_flask_core.models import FileSample, IPSample, DomainSample, URISample, ExecutableBinarySample, UserLevel from" ]
[ "JsonDifferently.load(Path(directory) / \"a.json\") b = JsonDifferently.load(Path(directory) / \"b.json\") actual_json = str(JsonDifferently(a, b, color=True))", "b, color=True)) with open(Path(directory) / \"actual-json.txt\", \"w\") as f: f.write(actual_json) with open(Path(directory) /", "from differently import JsonDifferently def test() -> None: for directory in scandir(Path() /", "expect = f.read() if actual_json != expect: print(\"ACTUAL:\") print(actual_json) print() print(\"EXPECTED:\") print(expect) assert", "open(Path(directory) / \"actual-json.txt\", \"w\") as f: f.write(actual_json) with open(Path(directory) / \"expect-json.txt\", \"r\") as", "\"expect-json.txt\", \"r\") as f: expect = f.read() if actual_json != expect: print(\"ACTUAL:\") print(actual_json)", "import Path from differently import JsonDifferently def test() -> None: for directory in", "\"actual-json.txt\", \"w\") as f: f.write(actual_json) with open(Path(directory) / \"expect-json.txt\", \"r\") as f: expect", "scandir(Path() / \"tests\" / \"cases\"): a = JsonDifferently.load(Path(directory) / \"a.json\") b = JsonDifferently.load(Path(directory)", "/ \"expect-json.txt\", \"r\") as f: expect = f.read() if actual_json != expect: print(\"ACTUAL:\")", "/ \"cases\"): a = JsonDifferently.load(Path(directory) / \"a.json\") b = JsonDifferently.load(Path(directory) / \"b.json\") actual_json", "b = JsonDifferently.load(Path(directory) / \"b.json\") actual_json = str(JsonDifferently(a, b, color=True)) with open(Path(directory) /", "open(Path(directory) / \"expect-json.txt\", \"r\") as f: expect = f.read() if actual_json != expect:", "Path from differently import JsonDifferently def test() -> None: for directory in scandir(Path()", "<filename>tests/test_cases.py from os import scandir from pathlib import Path from differently import JsonDifferently", "def test() -> None: for directory in scandir(Path() / \"tests\" / \"cases\"): a", "actual_json = str(JsonDifferently(a, b, color=True)) with open(Path(directory) / \"actual-json.txt\", \"w\") as f: f.write(actual_json)", "/ \"a.json\") b = JsonDifferently.load(Path(directory) / \"b.json\") actual_json = str(JsonDifferently(a, b, color=True)) with", "f.write(actual_json) with open(Path(directory) / \"expect-json.txt\", \"r\") as f: expect = f.read() if actual_json", "JsonDifferently.load(Path(directory) / \"b.json\") actual_json = str(JsonDifferently(a, b, color=True)) with open(Path(directory) / \"actual-json.txt\", \"w\")", "import scandir from pathlib import Path from differently import JsonDifferently def test() ->", "-> None: for directory in scandir(Path() / \"tests\" / \"cases\"): a = JsonDifferently.load(Path(directory)", "/ \"b.json\") actual_json = str(JsonDifferently(a, b, color=True)) with open(Path(directory) / \"actual-json.txt\", \"w\") as", "f: f.write(actual_json) with open(Path(directory) / \"expect-json.txt\", \"r\") as f: expect = f.read() if", "= f.read() if actual_json != expect: print(\"ACTUAL:\") print(actual_json) print() print(\"EXPECTED:\") print(expect) assert False", "import JsonDifferently def test() -> None: for directory in scandir(Path() / \"tests\" /", "\"a.json\") b = JsonDifferently.load(Path(directory) / \"b.json\") actual_json = str(JsonDifferently(a, b, color=True)) with open(Path(directory)", "None: for directory in scandir(Path() / \"tests\" / \"cases\"): a = JsonDifferently.load(Path(directory) /", "in scandir(Path() / \"tests\" / \"cases\"): a = JsonDifferently.load(Path(directory) / \"a.json\") b =", "os import scandir from pathlib import Path from differently import JsonDifferently def test()", "from os import scandir from pathlib import Path from differently import JsonDifferently def", "\"b.json\") actual_json = str(JsonDifferently(a, b, color=True)) with open(Path(directory) / \"actual-json.txt\", \"w\") as f:", "= JsonDifferently.load(Path(directory) / \"b.json\") actual_json = str(JsonDifferently(a, b, color=True)) with open(Path(directory) / \"actual-json.txt\",", "\"cases\"): a = JsonDifferently.load(Path(directory) / \"a.json\") b = JsonDifferently.load(Path(directory) / \"b.json\") actual_json =", "differently import JsonDifferently def test() -> None: for directory in scandir(Path() / \"tests\"", "as f: f.write(actual_json) with open(Path(directory) / \"expect-json.txt\", \"r\") as f: expect = f.read()", "with open(Path(directory) / \"actual-json.txt\", \"w\") as f: f.write(actual_json) with open(Path(directory) / \"expect-json.txt\", \"r\")", "directory in scandir(Path() / \"tests\" / \"cases\"): a = JsonDifferently.load(Path(directory) / \"a.json\") b", "pathlib import Path from differently import JsonDifferently def test() -> None: for directory", "a = JsonDifferently.load(Path(directory) / \"a.json\") b = JsonDifferently.load(Path(directory) / \"b.json\") actual_json = str(JsonDifferently(a,", "color=True)) with open(Path(directory) / \"actual-json.txt\", \"w\") as f: f.write(actual_json) with open(Path(directory) / \"expect-json.txt\",", "\"tests\" / \"cases\"): a = JsonDifferently.load(Path(directory) / \"a.json\") b = JsonDifferently.load(Path(directory) / \"b.json\")", "/ \"actual-json.txt\", \"w\") as f: f.write(actual_json) with open(Path(directory) / \"expect-json.txt\", \"r\") as f:", "from pathlib import Path from differently import JsonDifferently def test() -> None: for", "for directory in scandir(Path() / \"tests\" / \"cases\"): a = JsonDifferently.load(Path(directory) / \"a.json\")", "with open(Path(directory) / \"expect-json.txt\", \"r\") as f: expect = f.read() if actual_json !=", "scandir from pathlib import Path from differently import JsonDifferently def test() -> None:", "\"w\") as f: f.write(actual_json) with open(Path(directory) / \"expect-json.txt\", \"r\") as f: expect =", "as f: expect = f.read() if actual_json != expect: print(\"ACTUAL:\") print(actual_json) print() print(\"EXPECTED:\")", "= JsonDifferently.load(Path(directory) / \"a.json\") b = JsonDifferently.load(Path(directory) / \"b.json\") actual_json = str(JsonDifferently(a, b,", "\"r\") as f: expect = f.read() if actual_json != expect: print(\"ACTUAL:\") print(actual_json) print()", "str(JsonDifferently(a, b, color=True)) with open(Path(directory) / \"actual-json.txt\", \"w\") as f: f.write(actual_json) with open(Path(directory)", "JsonDifferently def test() -> None: for directory in scandir(Path() / \"tests\" / \"cases\"):", "/ \"tests\" / \"cases\"): a = JsonDifferently.load(Path(directory) / \"a.json\") b = JsonDifferently.load(Path(directory) /", "= str(JsonDifferently(a, b, color=True)) with open(Path(directory) / \"actual-json.txt\", \"w\") as f: f.write(actual_json) with", "f: expect = f.read() if actual_json != expect: print(\"ACTUAL:\") print(actual_json) print() print(\"EXPECTED:\") print(expect)", "test() -> None: for directory in scandir(Path() / \"tests\" / \"cases\"): a =" ]
[ "117, 116, 101, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32,", "test_by_log_differ(): differ = ld.LogDiffer.from_json('misc/nestest_log.json') nes = nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes) cpu", "114, 101, 99, 116, 44, 88, 41, 32, 116, 101, 115, 116, 115,", "0, 0, 0, 0, 0, 0, 0, ] result = cpu.ppu.memory[0x2000:0x2400] assert expected", "73, 110, 100, 105, 114, 101, 99, 116, 44, 88, 41, 32, 116,", "101, 108, 101, 99, 116, 32, 116, 101, 115, 116, 32, 32, 32,", "32, 70, 108, 97, 103, 32, 116, 101, 115, 116, 115, 32, 32,", "cpu.pc = 0xc000 cpu.status = nc._Status(0x24) while True: info = cpu.dump_registers() op, addr,", "45, 32, 40, 73, 110, 100, 105, 114, 101, 99, 116, 44, 88,", "while True: info = cpu.dump_registers() op, addr, mode = cpu._prepare() info['op'] = op", "test_status(): for i in range(255): s = nc._Status(i) # 第 5 位始终为 1", "overflow=0, negative=0, ) for k, v in expected.items(): r = getattr(s, k) assert", "= cpu.memory[addr] assert expected == result, result def _test_ppu(): nes = nft.prepared_nes() cpu", "+ 0x0100 result = cpu.memory[addr] assert expected == result, result def _test_ppu(): nes", "= 1 result = cpu.pop() assert expected == result, result def test_push_pop2(): mem", "97, 116, 101, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32,", "109, 109, 101, 100, 105, 97, 116, 101, 32, 116, 101, 115, 116,", "32, 32, 32, 32, 45, 45, 32, 83, 116, 97, 99, 107, 32,", "101, 44, 88, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32,", "32, 45, 45, 32, 73, 109, 109, 101, 100, 105, 97, 116, 101,", "== v, r def address_for_log_info(addr): if addr is None: return -1 else: return", "115, 111, 108, 117, 116, 101, 32, 116, 101, 115, 116, 115, 32,", "109, 117, 108, 97, 116, 111, 114, 32, 116, 101, 115, 116, 115,", "expected == result, result def _test_ppu(): nes = nft.prepared_nes() cpu = nc.NesCPU() cpu.load_nes(nes)", "zero=0, interrupt=1, decimal=0, overflow=0, negative=0, ) for k, v in expected.items(): r =", "None) cpu = nc.NesCPU(mem) cpu.push(1) cpu.push(2) cpu.push(3) cpu.push(4) expected = [4, 3, 2,", "114, 116, 58, 32, 114, 117, 110, 32, 116, 101, 115, 116, 32,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45,", "assert expected == result, result def test_push_pop2(): mem = nc.Memory(None, None) cpu =", "99, 116, 44, 88, 41, 32, 116, 101, 115, 116, 115, 32, 32,", "(i | 0b00100000), s.value def test_status2(): s = nc._Status(0x24) expected = dict( carry=0,", "105, 114, 101, 99, 116, 44, 88, 41, 32, 116, 101, 115, 116,", "101, 99, 116, 58, 32, 73, 110, 118, 97, 108, 105, 100, 32,", "32 * 1024: expected = nes.prg_rom else: expected = nes.prg_rom * 2 result", "expected == result, result def test_push(): \"\"\" addr = s + 0x0100 \"\"\"", "32, 32, 32, 45, 45, 32, 73, 109, 109, 101, 100, 105, 97,", "107, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32,", "32, 73, 109, 109, 101, 100, 105, 97, 116, 101, 32, 116, 101,", "32, 32, 32, 32, 45, 45, 32, 70, 108, 97, 103, 32, 116,", "32, 32, 45, 45, 32, 73, 109, 109, 101, 100, 105, 97, 116,", "100, 32, 111, 112, 115, 33, 32, 32, 32, 32, 32, 32, 32,", "1 assert s.value == (i | 0b00100000), s.value def test_status2(): s = nc._Status(0x24)", "nc.NesCPU(mem) cpu.push(1) cpu.push(2) cpu.push(3) cpu.push(4) expected = [4, 3, 2, 1] result =", "if len(nes.prg_rom) == 32 * 1024: expected = nes.prg_rom else: expected = nes.prg_rom", "assert s._ignore == 1 assert s.value == (i | 0b00100000), s.value def test_status2():", "expected = [ 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "32, 32, 45, 45, 32, 90, 101, 114, 111, 112, 97, 103, 101,", "result, result def _test_ppu(): nes = nft.prepared_nes() cpu = nc.NesCPU() cpu.load_nes(nes) cpu.interrupt('reset') for", "= nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes) cpu = nc.NesCPU(mem) # nestest.nes 所需的特殊初始化", ") for k, v in expected.items(): r = getattr(s, k) assert r ==", "= nc._Status(0x24) expected = dict( carry=0, zero=0, interrupt=1, decimal=0, overflow=0, negative=0, ) for", "32, 32, 45, 45, 32, 65, 99, 99, 117, 109, 117, 108, 97,", "[cpu.pop() for _ in range(4)] assert expected == result, result def test_push(): \"\"\"", "32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 73, 109,", "_ in range(4)] assert expected == result, result def test_push(): \"\"\" addr =", "116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32, 32, 32,", "116, 101, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32,", "= nes.prg_rom * 2 result = mem.prg_rom assert expected == result, result def", "45, 32, 40, 73, 110, 100, 105, 114, 101, 99, 116, 41, 44,", "None) mem.load_nes(nes) cpu = nc.NesCPU(mem) # nestest.nes 所需的特殊初始化 cpu.pc = 0xc000 cpu.status =", "negative=0, ) for k, v in expected.items(): r = getattr(s, k) assert r", "41, 44, 89, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32,", "sp + 0x0100 result = cpu.memory[addr] assert expected == result, result def _test_ppu():", "90, 101, 114, 111, 112, 97, 103, 101, 44, 88, 32, 116, 101,", "32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 90, 101, 114,", "41, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32,", "ld.LogDiffer.from_json('misc/nestest_log.json') nes = nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes) cpu = nc.NesCPU(mem) #", "32, 32, 32, 32, 85, 112, 47, 68, 111, 119, 110, 58, 32,", "== result, result def test_status(): for i in range(255): s = nc._Status(i) #", "_ in range(20000): cpu.execute() expected = [ 32, 32, 32, 32, 32, 32,", "| 0b00100000), s.value def test_status2(): s = nc._Status(0x24) expected = dict( carry=0, zero=0,", "def _test_ppu(): nes = nft.prepared_nes() cpu = nc.NesCPU() cpu.load_nes(nes) cpu.interrupt('reset') for _ in", "mem.prg_rom assert expected == result, result def test_status(): for i in range(255): s", "= 0xc000 cpu.status = nc._Status(0x24) while True: info = cpu.dump_registers() op, addr, mode", "32, 45, 45, 32, 70, 108, 97, 103, 32, 116, 101, 115, 116,", "mode) def test_push_pop1(): mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1) expected =", "45, 45, 32, 82, 117, 110, 32, 97, 108, 108, 32, 116, 101,", "cpu.push(4) expected = [4, 3, 2, 1] result = [cpu.pop() for _ in", "40, 73, 110, 100, 105, 114, 101, 99, 116, 44, 88, 41, 32,", "s._ignore == 1 assert s.value == (i | 0b00100000), s.value def test_status2(): s", "as nft def test_load_nes(): nes = nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes) if", "test_status2(): s = nc._Status(0x24) expected = dict( carry=0, zero=0, interrupt=1, decimal=0, overflow=0, negative=0,", "for i in range(255): s = nc._Status(i) # 第 5 位始终为 1 assert", "nes = nft.prepared_nes() cpu = nc.NesCPU() cpu.load_nes(nes) cpu.interrupt('reset') for _ in range(20000): cpu.execute()", "in range(4)] assert expected == result, result def test_push(): \"\"\" addr = s", "45, 45, 32, 90, 101, 114, 111, 112, 97, 103, 101, 32, 116,", "expected = dict( carry=0, zero=0, interrupt=1, decimal=0, overflow=0, negative=0, ) for k, v", "112, 97, 103, 101, 32, 116, 101, 115, 116, 115, 32, 32, 32,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 83, 101, 108,", "1 result = cpu.pop() assert expected == result, result def test_push_pop2(): mem =", "116, 101, 115, 116, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "except ld.AllTestsPassed: break cpu._execute(op, addr, mode) def test_push_pop1(): mem = nc.Memory(None, None) cpu", "117, 109, 117, 108, 97, 116, 111, 114, 32, 116, 101, 115, 116,", "73, 110, 100, 105, 114, 101, 99, 116, 41, 44, 89, 32, 116,", "try: differ.diff(info) except ld.AllTestsPassed: break cpu._execute(op, addr, mode) def test_push_pop1(): mem = nc.Memory(None,", "nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes) if len(nes.prg_rom) == 32 * 1024: expected", "32, 32, 32, 32, 32, 45, 45, 32, 65, 99, 99, 117, 109,", "101, 99, 116, 32, 116, 101, 115, 116, 32, 32, 32, 32, 32,", "range(20000): cpu.execute() expected = [ 32, 32, 32, 32, 32, 32, 32, 32,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 0,", "= ld.LogDiffer.from_json('misc/nestest_log.json') nes = nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes) cpu = nc.NesCPU(mem)", "= cpu.sp cpu.push(1) expected = 1 addr = sp + 0x0100 result =", "110, 32, 97, 108, 108, 32, 116, 101, 115, 116, 115, 32, 32,", "32, 32, 32, 32, 32, 32, 45, 45, 32, 65, 99, 99, 117,", "111, 112, 97, 103, 101, 32, 116, 101, 115, 116, 115, 32, 32,", "= nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes) if len(nes.prg_rom) == 32 * 1024:", "0x0100 result = cpu.memory[addr] assert expected == result, result def _test_ppu(): nes =", "32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32, 32,", "def test_push_pop1(): mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1) expected = 1", "45, 32, 70, 108, 97, 103, 32, 116, 101, 115, 116, 115, 32,", "interrupt=1, decimal=0, overflow=0, negative=0, ) for k, v in expected.items(): r = getattr(s,", "101, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32,", "32, 32, 32, 32, 32, 45, 45, 32, 65, 98, 115, 111, 108,", "\"\"\" addr = s + 0x0100 \"\"\" mem = nc.Memory(None, None) cpu =", "op info['address'] = address_for_log_info(addr) try: differ.diff(info) except ld.AllTestsPassed: break cpu._execute(op, addr, mode) def", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 85, 112, 47,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 70,", "105, 101, 100, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32,", "nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1) expected = 1 result = cpu.pop() assert", "1024: expected = nes.prg_rom else: expected = nes.prg_rom * 2 result = mem.prg_rom", "def test_status(): for i in range(255): s = nc._Status(i) # 第 5 位始终为", "32, 90, 101, 114, 111, 112, 97, 103, 101, 44, 88, 32, 116,", "108, 117, 116, 101, 32, 116, 101, 115, 116, 115, 32, 32, 32,", "32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 40, 73, 110,", "32, 40, 73, 110, 100, 105, 114, 101, 99, 116, 41, 44, 89,", "108, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32,", "32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 83, 116, 97,", "in expected.items(): r = getattr(s, k) assert r == v, r def address_for_log_info(addr):", "None) mem.load_nes(nes) if len(nes.prg_rom) == 32 * 1024: expected = nes.prg_rom else: expected", "= cpu.pop() assert expected == result, result def test_push_pop2(): mem = nc.Memory(None, None)", "range(255): s = nc._Status(i) # 第 5 位始终为 1 assert s._ignore == 1", "result, result def test_status(): for i in range(255): s = nc._Status(i) # 第", "is None: return -1 else: return addr def test_by_log_differ(): differ = ld.LogDiffer.from_json('misc/nestest_log.json') nes", "118, 97, 108, 105, 100, 32, 111, 112, 115, 33, 32, 32, 32,", "109, 112, 108, 105, 101, 100, 32, 116, 101, 115, 116, 115, 32,", "101, 108, 101, 99, 116, 58, 32, 73, 110, 118, 97, 108, 105,", "45, 45, 32, 66, 114, 97, 110, 99, 104, 32, 116, 101, 115,", "0, 0, 0, 0, 0, ] result = cpu.ppu.memory[0x2000:0x2400] assert expected == result,", "log_differ as ld import nes_cpu as nc import nes_file_test as nft def test_load_nes():", "op, addr, mode = cpu._prepare() info['op'] = op info['address'] = address_for_log_info(addr) try: differ.diff(info)", "32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 66, 114, 97,", "58, 32, 114, 117, 110, 32, 116, 101, 115, 116, 32, 32, 32,", "# 第 5 位始终为 1 assert s._ignore == 1 assert s.value == (i", "32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 65, 98,", "32, 32, 32, 32, 32, 45, 45, 32, 82, 117, 110, 32, 97,", "cpu.push(3) cpu.push(4) expected = [4, 3, 2, 1] result = [cpu.pop() for _", "= nc.Memory(None, None) cpu = nc.NesCPU(mem) sp = cpu.sp cpu.push(1) expected = 1", "32, 32, 83, 116, 97, 114, 116, 58, 32, 114, 117, 110, 32,", "addr is None: return -1 else: return addr def test_by_log_differ(): differ = ld.LogDiffer.from_json('misc/nestest_log.json')", "== result, result def test_push_pop2(): mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1)", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45,", "32, 97, 108, 108, 32, 116, 101, 115, 116, 115, 32, 32, 32,", "32, 32, 32, 32, 32, 32, 45, 45, 32, 83, 116, 97, 99,", "32, 32, 32, 32, 45, 45, 32, 65, 99, 99, 117, 109, 117,", "58, 32, 73, 110, 118, 97, 108, 105, 100, 32, 111, 112, 115,", "_test_ppu(): nes = nft.prepared_nes() cpu = nc.NesCPU() cpu.load_nes(nes) cpu.interrupt('reset') for _ in range(20000):", "98, 115, 111, 108, 117, 116, 101, 44, 88, 32, 116, 101, 115,", "115, 116, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "= nft.prepared_nes() cpu = nc.NesCPU() cpu.load_nes(nes) cpu.interrupt('reset') for _ in range(20000): cpu.execute() expected", "cpu.push(1) cpu.push(2) cpu.push(3) cpu.push(4) expected = [4, 3, 2, 1] result = [cpu.pop()", "111, 112, 115, 33, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "mem = nc.Memory(None, None) mem.load_nes(nes) cpu = nc.NesCPU(mem) # nestest.nes 所需的特殊初始化 cpu.pc =", "as nc import nes_file_test as nft def test_load_nes(): nes = nft.prepared_nes() mem =", "test_push(): \"\"\" addr = s + 0x0100 \"\"\" mem = nc.Memory(None, None) cpu", "所需的特殊初始化 cpu.pc = 0xc000 cpu.status = nc._Status(0x24) while True: info = cpu.dump_registers() op,", "nc.Memory(None, None) cpu = nc.NesCPU(mem) sp = cpu.sp cpu.push(1) expected = 1 addr", "99, 116, 32, 116, 101, 115, 116, 32, 32, 32, 32, 32, 32,", "cpu.execute() expected = [ 32, 32, 32, 32, 32, 32, 32, 32, 32,", "44, 88, 41, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32,", "range(4)] assert expected == result, result def test_push(): \"\"\" addr = s +", "112, 115, 33, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 73, 109, 112,", "in range(20000): cpu.execute() expected = [ 32, 32, 32, 32, 32, 32, 32,", "nc.NesCPU(mem) # nestest.nes 所需的特殊初始化 cpu.pc = 0xc000 cpu.status = nc._Status(0x24) while True: info", "114, 101, 99, 116, 41, 44, 89, 32, 116, 101, 115, 116, 115,", "for _ in range(4)] assert expected == result, result def test_push(): \"\"\" addr", "address_for_log_info(addr): if addr is None: return -1 else: return addr def test_by_log_differ(): differ", "32, 32, 32, 32, 45, 45, 32, 73, 109, 109, 101, 100, 105,", "mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1) cpu.push(2) cpu.push(3) cpu.push(4) expected =", "test_load_nes(): nes = nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes) if len(nes.prg_rom) == 32", "32, 32, 32, 32, 32, 45, 45, 32, 40, 73, 110, 100, 105,", "decimal=0, overflow=0, negative=0, ) for k, v in expected.items(): r = getattr(s, k)", "108, 105, 101, 100, 32, 116, 101, 115, 116, 115, 32, 32, 32,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 90,", "85, 112, 47, 68, 111, 119, 110, 58, 32, 115, 101, 108, 101,", "100, 105, 114, 101, 99, 116, 41, 44, 89, 32, 116, 101, 115,", "result = cpu.memory[addr] assert expected == result, result def _test_ppu(): nes = nft.prepared_nes()", "32, 32, 32, 32, 45, 45, 32, 65, 98, 115, 111, 108, 117,", "104, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32,", "0, 0, 0, 0, 0, 0, 0, 0, 0, ] result = cpu.ppu.memory[0x2000:0x2400]", "== 32 * 1024: expected = nes.prg_rom else: expected = nes.prg_rom * 2", "32, 73, 109, 112, 108, 105, 101, 100, 32, 116, 101, 115, 116,", "83, 116, 97, 99, 107, 32, 116, 101, 115, 116, 115, 32, 32,", "nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1) cpu.push(2) cpu.push(3) cpu.push(4) expected = [4, 3,", "98, 115, 111, 108, 117, 116, 101, 32, 116, 101, 115, 116, 115,", "83, 101, 108, 101, 99, 116, 58, 32, 73, 110, 118, 97, 108,", "45, 32, 90, 101, 114, 111, 112, 97, 103, 101, 32, 116, 101,", "45, 45, 32, 65, 98, 115, 111, 108, 117, 116, 101, 44, 88,", "116, 32, 116, 101, 115, 116, 32, 32, 32, 32, 32, 32, 32,", "101, 115, 116, 115, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "101, 100, 105, 97, 116, 101, 32, 116, 101, 115, 116, 115, 32,", "r = getattr(s, k) assert r == v, r def address_for_log_info(addr): if addr", "32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 65,", "expected = [4, 3, 2, 1] result = [cpu.pop() for _ in range(4)]", "32, 82, 117, 110, 32, 97, 108, 108, 32, 116, 101, 115, 116,", "45, 45, 32, 65, 98, 115, 111, 108, 117, 116, 101, 44, 89,", "32, 65, 99, 99, 117, 109, 117, 108, 97, 116, 111, 114, 32,", "else: return addr def test_by_log_differ(): differ = ld.LogDiffer.from_json('misc/nestest_log.json') nes = nft.prepared_nes() mem =", "result def test_push(): \"\"\" addr = s + 0x0100 \"\"\" mem = nc.Memory(None,", "105, 114, 101, 99, 116, 41, 44, 89, 32, 116, 101, 115, 116,", "32, 32, 45, 45, 32, 83, 116, 97, 99, 107, 32, 116, 101,", "= nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1) cpu.push(2) cpu.push(3) cpu.push(4) expected = [4,", "= nc._Status(i) # 第 5 位始终为 1 assert s._ignore == 1 assert s.value", "114, 111, 112, 97, 103, 101, 44, 88, 32, 116, 101, 115, 116,", "111, 108, 117, 116, 101, 32, 116, 101, 115, 116, 115, 32, 32,", "= [ 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32,", "45, 32, 73, 109, 109, 101, 100, 105, 97, 116, 101, 32, 116,", "expected == result, result def test_push_pop2(): mem = nc.Memory(None, None) cpu = nc.NesCPU(mem)", "45, 45, 32, 40, 73, 110, 100, 105, 114, 101, 99, 116, 41,", "-*- import log_differ as ld import nes_cpu as nc import nes_file_test as nft", "len(nes.prg_rom) == 32 * 1024: expected = nes.prg_rom else: expected = nes.prg_rom *", "= sp + 0x0100 result = cpu.memory[addr] assert expected == result, result def", "32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 82,", "32, 32, 32, 32, 32, 32, 32, 32, 83, 116, 97, 114, 116,", "v in expected.items(): r = getattr(s, k) assert r == v, r def", "32, 32, 32, 45, 45, 32, 65, 98, 115, 111, 108, 117, 116,", "32, 32, 32, 32, 32, 83, 101, 108, 101, 99, 116, 58, 32,", "-1 else: return addr def test_by_log_differ(): differ = ld.LogDiffer.from_json('misc/nestest_log.json') nes = nft.prepared_nes() mem", "32, 32, 45, 45, 32, 73, 109, 112, 108, 105, 101, 100, 32,", "expected = nes.prg_rom else: expected = nes.prg_rom * 2 result = mem.prg_rom assert", "32, 32, 45, 45, 32, 40, 73, 110, 100, 105, 114, 101, 99,", "32, 32, 32, 32, 32, 32, 45, 45, 32, 40, 73, 110, 100,", "32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "44, 89, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32,", "32, 32, 32, 83, 101, 108, 101, 99, 116, 58, 32, 73, 110,", "115, 101, 108, 101, 99, 116, 32, 116, 101, 115, 116, 32, 32,", "32, 65, 98, 115, 111, 108, 117, 116, 101, 44, 89, 32, 116,", "111, 112, 97, 103, 101, 44, 88, 32, 116, 101, 115, 116, 115,", "45, 32, 65, 98, 115, 111, 108, 117, 116, 101, 32, 116, 101,", "nc.Memory(None, None) mem.load_nes(nes) if len(nes.prg_rom) == 32 * 1024: expected = nes.prg_rom else:", "115, 116, 115, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "0, 0, 0, 0, 0, 0, 0, 0, ] result = cpu.ppu.memory[0x2000:0x2400] assert", "88, 41, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32,", "32, 115, 101, 108, 101, 99, 116, 32, 116, 101, 115, 116, 32,", "32, 32, 32, 32, 32, 32, 85, 112, 47, 68, 111, 119, 110,", "s.value def test_status2(): s = nc._Status(0x24) expected = dict( carry=0, zero=0, interrupt=1, decimal=0,", "dict( carry=0, zero=0, interrupt=1, decimal=0, overflow=0, negative=0, ) for k, v in expected.items():", "82, 117, 110, 32, 97, 108, 108, 32, 116, 101, 115, 116, 115,", "differ = ld.LogDiffer.from_json('misc/nestest_log.json') nes = nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes) cpu =", "45, 45, 32, 70, 108, 97, 103, 32, 116, 101, 115, 116, 115,", "= nc.NesCPU(mem) sp = cpu.sp cpu.push(1) expected = 1 addr = sp +", "32, 32, 32, 45, 45, 32, 83, 116, 97, 99, 107, 32, 116,", "expected == result, result def test_status(): for i in range(255): s = nc._Status(i)", "nc.Memory(None, None) mem.load_nes(nes) cpu = nc.NesCPU(mem) # nestest.nes 所需的特殊初始化 cpu.pc = 0xc000 cpu.status", "32, 32, 32, 32, 32, 83, 116, 97, 114, 116, 58, 32, 114,", "101, 100, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32,", "result def test_status(): for i in range(255): s = nc._Status(i) # 第 5", "110, 99, 104, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32,", "coding: utf-8 -*- import log_differ as ld import nes_cpu as nc import nes_file_test", "32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 82, 117, 110,", "32, 45, 45, 32, 90, 101, 114, 111, 112, 97, 103, 101, 44,", "sp = cpu.sp cpu.push(1) expected = 1 addr = sp + 0x0100 result", "cpu.interrupt('reset') for _ in range(20000): cpu.execute() expected = [ 32, 32, 32, 32,", "32, 32, 32, 45, 45, 32, 90, 101, 114, 111, 112, 97, 103,", "32, 32, 85, 112, 47, 68, 111, 119, 110, 58, 32, 115, 101,", "32, 45, 45, 32, 40, 73, 110, 100, 105, 114, 101, 99, 116,", "mem = nc.Memory(None, None) mem.load_nes(nes) if len(nes.prg_rom) == 32 * 1024: expected =", "32, 32, 32, 32, 83, 116, 97, 114, 116, 58, 32, 114, 117,", "# -*- coding: utf-8 -*- import log_differ as ld import nes_cpu as nc", "= cpu.dump_registers() op, addr, mode = cpu._prepare() info['op'] = op info['address'] = address_for_log_info(addr)", "assert expected == result, result def test_push(): \"\"\" addr = s + 0x0100", "89, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32,", "result def _test_ppu(): nes = nft.prepared_nes() cpu = nc.NesCPU() cpu.load_nes(nes) cpu.interrupt('reset') for _", "return addr def test_by_log_differ(): differ = ld.LogDiffer.from_json('misc/nestest_log.json') nes = nft.prepared_nes() mem = nc.Memory(None,", "32, 45, 45, 32, 65, 99, 99, 117, 109, 117, 108, 97, 116,", "= nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1) expected = 1 result = cpu.pop()", "nes = nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes) cpu = nc.NesCPU(mem) # nestest.nes", "101, 114, 111, 112, 97, 103, 101, 44, 88, 32, 116, 101, 115,", "import log_differ as ld import nes_cpu as nc import nes_file_test as nft def", "103, 101, 44, 88, 32, 116, 101, 115, 116, 115, 32, 32, 32,", "32, 65, 98, 115, 111, 108, 117, 116, 101, 44, 88, 32, 116,", "in range(255): s = nc._Status(i) # 第 5 位始终为 1 assert s._ignore ==", "32, 32, 32, 32, 32, 32, 45, 45, 32, 82, 117, 110, 32,", "100, 105, 114, 101, 99, 116, 44, 88, 41, 32, 116, 101, 115,", "for k, v in expected.items(): r = getattr(s, k) assert r == v,", "result, result def test_push_pop2(): mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1) cpu.push(2)", "32, 40, 73, 110, 100, 105, 114, 101, 99, 116, 44, 88, 41,", "32, 32, 32, 32, 32, 32, 45, 45, 32, 66, 114, 97, 110,", "32, 32, 32, 32, 45, 45, 32, 73, 109, 112, 108, 105, 101,", "mem.load_nes(nes) if len(nes.prg_rom) == 32 * 1024: expected = nes.prg_rom else: expected =", "1 addr = sp + 0x0100 result = cpu.memory[addr] assert expected == result,", "第 5 位始终为 1 assert s._ignore == 1 assert s.value == (i |", "32, 114, 117, 110, 32, 116, 101, 115, 116, 32, 32, 32, 32,", "32, 66, 114, 97, 110, 99, 104, 32, 116, 101, 115, 116, 115,", "32, 83, 116, 97, 114, 116, 58, 32, 114, 117, 110, 32, 116,", "32, 32, 32, 32, 32, 32, 45, 45, 32, 65, 98, 115, 111,", "65, 98, 115, 111, 108, 117, 116, 101, 44, 88, 32, 116, 101,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 40,", "108, 117, 116, 101, 44, 89, 32, 116, 101, 115, 116, 115, 32,", "cpu = nc.NesCPU(mem) # nestest.nes 所需的特殊初始化 cpu.pc = 0xc000 cpu.status = nc._Status(0x24) while", "info['op'] = op info['address'] = address_for_log_info(addr) try: differ.diff(info) except ld.AllTestsPassed: break cpu._execute(op, addr,", "99, 99, 117, 109, 117, 108, 97, 116, 111, 114, 32, 116, 101,", "expected = nes.prg_rom * 2 result = mem.prg_rom assert expected == result, result", "45, 32, 82, 117, 110, 32, 97, 108, 108, 32, 116, 101, 115,", "65, 98, 115, 111, 108, 117, 116, 101, 32, 116, 101, 115, 116,", "114, 97, 110, 99, 104, 32, 116, 101, 115, 116, 115, 32, 32,", "45, 45, 32, 90, 101, 114, 111, 112, 97, 103, 101, 44, 88,", "97, 116, 111, 114, 32, 116, 101, 115, 116, 115, 32, 32, 32,", "32, 32, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "0, 0, 0, 0, ] result = cpu.ppu.memory[0x2000:0x2400] assert expected == result, result", "= nc.NesCPU(mem) cpu.push(1) cpu.push(2) cpu.push(3) cpu.push(4) expected = [4, 3, 2, 1] result", "32, 32, 32, 32, 32, 32, 83, 116, 97, 114, 116, 58, 32,", "32, 32, 32, 32, 32, 45, 45, 32, 73, 109, 109, 101, 100,", "108, 97, 116, 111, 114, 32, 116, 101, 115, 116, 115, 32, 32,", "112, 97, 103, 101, 44, 88, 32, 116, 101, 115, 116, 115, 32,", "0, 0, 0, 0, 0, 0, ] result = cpu.ppu.memory[0x2000:0x2400] assert expected ==", "def test_status2(): s = nc._Status(0x24) expected = dict( carry=0, zero=0, interrupt=1, decimal=0, overflow=0,", "+ 0x0100 \"\"\" mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) sp = cpu.sp", "cpu.dump_registers() op, addr, mode = cpu._prepare() info['op'] = op info['address'] = address_for_log_info(addr) try:", "32, 32, 32, 32, 32, 32, 32, 85, 112, 47, 68, 111, 119,", "109, 101, 100, 105, 97, 116, 101, 32, 116, 101, 115, 116, 115,", "116, 101, 44, 89, 32, 116, 101, 115, 116, 115, 32, 32, 32,", "result, result def test_push(): \"\"\" addr = s + 0x0100 \"\"\" mem =", "assert expected == result, result def _test_ppu(): nes = nft.prepared_nes() cpu = nc.NesCPU()", "32, 32, 32, 45, 45, 32, 65, 99, 99, 117, 109, 117, 108,", "32, 45, 45, 32, 66, 114, 97, 110, 99, 104, 32, 116, 101,", "= nc.NesCPU(mem) cpu.push(1) expected = 1 result = cpu.pop() assert expected == result,", "0xc000 cpu.status = nc._Status(0x24) while True: info = cpu.dump_registers() op, addr, mode =", "45, 32, 65, 99, 99, 117, 109, 117, 108, 97, 116, 111, 114,", "83, 116, 97, 114, 116, 58, 32, 114, 117, 110, 32, 116, 101,", "utf-8 -*- import log_differ as ld import nes_cpu as nc import nes_file_test as", "110, 58, 32, 115, 101, 108, 101, 99, 116, 32, 116, 101, 115,", "cpu.sp cpu.push(1) expected = 1 addr = sp + 0x0100 result = cpu.memory[addr]", "32, 32, 45, 45, 32, 70, 108, 97, 103, 32, 116, 101, 115,", "65, 99, 99, 117, 109, 117, 108, 97, 116, 111, 114, 32, 116,", "100, 105, 97, 116, 101, 32, 116, 101, 115, 116, 115, 32, 32,", "= [4, 3, 2, 1] result = [cpu.pop() for _ in range(4)] assert", "addr, mode = cpu._prepare() info['op'] = op info['address'] = address_for_log_info(addr) try: differ.diff(info) except", "nft.prepared_nes() cpu = nc.NesCPU() cpu.load_nes(nes) cpu.interrupt('reset') for _ in range(20000): cpu.execute() expected =", "differ.diff(info) except ld.AllTestsPassed: break cpu._execute(op, addr, mode) def test_push_pop1(): mem = nc.Memory(None, None)", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 0, 0,", "mem.load_nes(nes) cpu = nc.NesCPU(mem) # nestest.nes 所需的特殊初始化 cpu.pc = 0xc000 cpu.status = nc._Status(0x24)", "== result, result def _test_ppu(): nes = nft.prepared_nes() cpu = nc.NesCPU() cpu.load_nes(nes) cpu.interrupt('reset')", "100, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32,", "66, 114, 97, 110, 99, 104, 32, 116, 101, 115, 116, 115, 32,", "32, 45, 45, 32, 65, 98, 115, 111, 108, 117, 116, 101, 32,", "45, 45, 32, 65, 98, 115, 111, 108, 117, 116, 101, 32, 116,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 83,", "32, 32, 32, 32, 32, 32, 32, 32, 83, 101, 108, 101, 99,", "cpu.pop() assert expected == result, result def test_push_pop2(): mem = nc.Memory(None, None) cpu", "address_for_log_info(addr) try: differ.diff(info) except ld.AllTestsPassed: break cpu._execute(op, addr, mode) def test_push_pop1(): mem =", "[ 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "r == v, r def address_for_log_info(addr): if addr is None: return -1 else:", "32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 66, 114,", "110, 32, 116, 101, 115, 116, 32, 32, 32, 32, 32, 32, 32,", "32, 32, 32, 32, 32, 32, 83, 101, 108, 101, 99, 116, 58,", "45, 45, 32, 65, 99, 99, 117, 109, 117, 108, 97, 116, 111,", "= mem.prg_rom assert expected == result, result def test_status(): for i in range(255):", "97, 114, 116, 58, 32, 114, 117, 110, 32, 116, 101, 115, 116,", "32, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "116, 101, 44, 88, 32, 116, 101, 115, 116, 115, 32, 32, 32,", "= op info['address'] = address_for_log_info(addr) try: differ.diff(info) except ld.AllTestsPassed: break cpu._execute(op, addr, mode)", "32, 32, 32, 83, 116, 97, 114, 116, 58, 32, 114, 117, 110,", "32, 83, 101, 108, 101, 99, 116, 58, 32, 73, 110, 118, 97,", "info['address'] = address_for_log_info(addr) try: differ.diff(info) except ld.AllTestsPassed: break cpu._execute(op, addr, mode) def test_push_pop1():", "32, 32, 32, 32, 32, 32, 32, 32, 0, 0, 0, 0, 0,", "101, 99, 116, 41, 44, 89, 32, 116, 101, 115, 116, 115, 32,", "99, 116, 58, 32, 73, 110, 118, 97, 108, 105, 100, 32, 111,", "105, 97, 116, 101, 32, 116, 101, 115, 116, 115, 32, 32, 32,", "cpu.load_nes(nes) cpu.interrupt('reset') for _ in range(20000): cpu.execute() expected = [ 32, 32, 32,", "114, 117, 110, 32, 116, 101, 115, 116, 32, 32, 32, 32, 32,", "cpu.push(2) cpu.push(3) cpu.push(4) expected = [4, 3, 2, 1] result = [cpu.pop() for", "assert r == v, r def address_for_log_info(addr): if addr is None: return -1", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 0, 0, 0,", "break cpu._execute(op, addr, mode) def test_push_pop1(): mem = nc.Memory(None, None) cpu = nc.NesCPU(mem)", "= 1 addr = sp + 0x0100 result = cpu.memory[addr] assert expected ==", "s.value == (i | 0b00100000), s.value def test_status2(): s = nc._Status(0x24) expected =", "103, 101, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32,", "32, 32, 32, 32, 32, 32, 32, 32, 85, 112, 47, 68, 111,", "116, 97, 114, 116, 58, 32, 114, 117, 110, 32, 116, 101, 115,", "73, 110, 118, 97, 108, 105, 100, 32, 111, 112, 115, 33, 32,", "108, 105, 100, 32, 111, 112, 115, 33, 32, 32, 32, 32, 32,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ] result =", "cpu._prepare() info['op'] = op info['address'] = address_for_log_info(addr) try: differ.diff(info) except ld.AllTestsPassed: break cpu._execute(op,", "cpu.push(1) expected = 1 result = cpu.pop() assert expected == result, result def", "117, 116, 101, 44, 89, 32, 116, 101, 115, 116, 115, 32, 32,", "5 位始终为 1 assert s._ignore == 1 assert s.value == (i | 0b00100000),", "32, 32, 32, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0,", "s = nc._Status(i) # 第 5 位始终为 1 assert s._ignore == 1 assert", "44, 88, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32,", "32, 32, 45, 45, 32, 82, 117, 110, 32, 97, 108, 108, 32,", "112, 108, 105, 101, 100, 32, 116, 101, 115, 116, 115, 32, 32,", "addr = s + 0x0100 \"\"\" mem = nc.Memory(None, None) cpu = nc.NesCPU(mem)", "32, 32, 32, 32, 32, 32, 45, 45, 32, 70, 108, 97, 103,", "<filename>ad_fcemu/nes_cpu_test.py # -*- coding: utf-8 -*- import log_differ as ld import nes_cpu as", "v, r def address_for_log_info(addr): if addr is None: return -1 else: return addr", "nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes) cpu = nc.NesCPU(mem) # nestest.nes 所需的特殊初始化 cpu.pc", "32, 32, 32, 32, 45, 45, 32, 82, 117, 110, 32, 97, 108,", "nc.NesCPU(mem) cpu.push(1) expected = 1 result = cpu.pop() assert expected == result, result", "45, 45, 32, 73, 109, 109, 101, 100, 105, 97, 116, 101, 32,", "nft def test_load_nes(): nes = nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes) if len(nes.prg_rom)", "110, 100, 105, 114, 101, 99, 116, 41, 44, 89, 32, 116, 101,", "assert s.value == (i | 0b00100000), s.value def test_status2(): s = nc._Status(0x24) expected", "32, 32, 32, 32, 45, 45, 32, 90, 101, 114, 111, 112, 97,", "108, 117, 116, 101, 44, 88, 32, 116, 101, 115, 116, 115, 32,", "114, 111, 112, 97, 103, 101, 32, 116, 101, 115, 116, 115, 32,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 83,", "return -1 else: return addr def test_by_log_differ(): differ = ld.LogDiffer.from_json('misc/nestest_log.json') nes = nft.prepared_nes()", "0x0100 \"\"\" mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) sp = cpu.sp cpu.push(1)", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "32, 45, 45, 32, 82, 117, 110, 32, 97, 108, 108, 32, 116,", "def test_push_pop2(): mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1) cpu.push(2) cpu.push(3) cpu.push(4)", "32, 32, 32, 32, 32, 32, 32, 83, 116, 97, 114, 116, 58,", "cpu.memory[addr] assert expected == result, result def _test_ppu(): nes = nft.prepared_nes() cpu =", "32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 82, 117,", "111, 114, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32,", "116, 58, 32, 114, 117, 110, 32, 116, 101, 115, 116, 32, 32,", "58, 32, 115, 101, 108, 101, 99, 116, 32, 116, 101, 115, 116,", "47, 68, 111, 119, 110, 58, 32, 115, 101, 108, 101, 99, 116,", "99, 104, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32,", "32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 70, 108, 97,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 85, 112,", "45, 32, 73, 109, 112, 108, 105, 101, 100, 32, 116, 101, 115,", "111, 108, 117, 116, 101, 44, 88, 32, 116, 101, 115, 116, 115,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 83, 101, 108, 101,", "nc._Status(0x24) expected = dict( carry=0, zero=0, interrupt=1, decimal=0, overflow=0, negative=0, ) for k,", "ld.AllTestsPassed: break cpu._execute(op, addr, mode) def test_push_pop1(): mem = nc.Memory(None, None) cpu =", "32, 32, 32, 32, 83, 101, 108, 101, 99, 116, 58, 32, 73,", "32, 32, 45, 45, 32, 65, 98, 115, 111, 108, 117, 116, 101,", "== 1 assert s.value == (i | 0b00100000), s.value def test_status2(): s =", "32, 32, 32, 32, 45, 45, 32, 66, 114, 97, 110, 99, 104,", "119, 110, 58, 32, 115, 101, 108, 101, 99, 116, 32, 116, 101,", "32, 32, 32, 32, 32, 45, 45, 32, 90, 101, 114, 111, 112,", "carry=0, zero=0, interrupt=1, decimal=0, overflow=0, negative=0, ) for k, v in expected.items(): r", "s + 0x0100 \"\"\" mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) sp =", "32, 32, 32, 32, 32, 32, 45, 45, 32, 90, 101, 114, 111,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 85,", "addr, mode) def test_push_pop1(): mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1) expected", "nes = nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes) if len(nes.prg_rom) == 32 *", "105, 100, 32, 111, 112, 115, 33, 32, 32, 32, 32, 32, 32,", "nes_cpu as nc import nes_file_test as nft def test_load_nes(): nes = nft.prepared_nes() mem", "= getattr(s, k) assert r == v, r def address_for_log_info(addr): if addr is", "nc._Status(0x24) while True: info = cpu.dump_registers() op, addr, mode = cpu._prepare() info['op'] =", "nc import nes_file_test as nft def test_load_nes(): nes = nft.prepared_nes() mem = nc.Memory(None,", "108, 101, 99, 116, 58, 32, 73, 110, 118, 97, 108, 105, 100,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ] result", "1] result = [cpu.pop() for _ in range(4)] assert expected == result, result", "cpu = nc.NesCPU(mem) sp = cpu.sp cpu.push(1) expected = 1 addr = sp", "114, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32,", "32, 32, 32, 32, 45, 45, 32, 40, 73, 110, 100, 105, 114,", "32, 65, 98, 115, 111, 108, 117, 116, 101, 32, 116, 101, 115,", "cpu.status = nc._Status(0x24) while True: info = cpu.dump_registers() op, addr, mode = cpu._prepare()", "= address_for_log_info(addr) try: differ.diff(info) except ld.AllTestsPassed: break cpu._execute(op, addr, mode) def test_push_pop1(): mem", "97, 103, 101, 44, 88, 32, 116, 101, 115, 116, 115, 32, 32,", "111, 119, 110, 58, 32, 115, 101, 108, 101, 99, 116, 32, 116,", "for _ in range(20000): cpu.execute() expected = [ 32, 32, 32, 32, 32,", "# nestest.nes 所需的特殊初始化 cpu.pc = 0xc000 cpu.status = nc._Status(0x24) while True: info =", "32, 85, 112, 47, 68, 111, 119, 110, 58, 32, 115, 101, 108,", "32, 32, 32, 85, 112, 47, 68, 111, 119, 110, 58, 32, 115,", "117, 110, 32, 116, 101, 115, 116, 32, 32, 32, 32, 32, 32,", "32, 32, 32, 32, 32, 32, 32, 83, 101, 108, 101, 99, 116,", "32, 111, 112, 115, 33, 32, 32, 32, 32, 32, 32, 32, 32,", "101, 115, 116, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "expected = 1 addr = sp + 0x0100 result = cpu.memory[addr] assert expected", "== (i | 0b00100000), s.value def test_status2(): s = nc._Status(0x24) expected = dict(", "68, 111, 119, 110, 58, 32, 115, 101, 108, 101, 99, 116, 32,", "nc.NesCPU() cpu.load_nes(nes) cpu.interrupt('reset') for _ in range(20000): cpu.execute() expected = [ 32, 32,", "if addr is None: return -1 else: return addr def test_by_log_differ(): differ =", "-*- coding: utf-8 -*- import log_differ as ld import nes_cpu as nc import", "32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 65, 99,", "101, 114, 111, 112, 97, 103, 101, 32, 116, 101, 115, 116, 115,", "32, 32, 32, 32, 32, 45, 45, 32, 73, 109, 112, 108, 105,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 83, 101,", "115, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "result = mem.prg_rom assert expected == result, result def test_status(): for i in", "32, 90, 101, 114, 111, 112, 97, 103, 101, 32, 116, 101, 115,", "116, 97, 99, 107, 32, 116, 101, 115, 116, 115, 32, 32, 32,", "def test_load_nes(): nes = nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes) if len(nes.prg_rom) ==", "= nes.prg_rom else: expected = nes.prg_rom * 2 result = mem.prg_rom assert expected", "103, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32,", "117, 110, 32, 97, 108, 108, 32, 116, 101, 115, 116, 115, 32,", "108, 108, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32,", "32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 90, 101,", "r def address_for_log_info(addr): if addr is None: return -1 else: return addr def", "= [cpu.pop() for _ in range(4)] assert expected == result, result def test_push():", "45, 32, 83, 116, 97, 99, 107, 32, 116, 101, 115, 116, 115,", "32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 70, 108,", "cpu = nc.NesCPU(mem) cpu.push(1) cpu.push(2) cpu.push(3) cpu.push(4) expected = [4, 3, 2, 1]", "116, 111, 114, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32,", "65, 98, 115, 111, 108, 117, 116, 101, 44, 89, 32, 116, 101,", "3, 2, 1] result = [cpu.pop() for _ in range(4)] assert expected ==", "def test_push(): \"\"\" addr = s + 0x0100 \"\"\" mem = nc.Memory(None, None)", "32, 32, 32, 32, 32, 32, 32, 0, 0, 0, 0, 0, 0,", "45, 32, 90, 101, 114, 111, 112, 97, 103, 101, 44, 88, 32,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "32, 45, 45, 32, 83, 116, 97, 99, 107, 32, 116, 101, 115,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 83, 116, 97, 114,", "32, 32, 45, 45, 32, 66, 114, 97, 110, 99, 104, 32, 116,", "* 1024: expected = nes.prg_rom else: expected = nes.prg_rom * 2 result =", "45, 45, 32, 40, 73, 110, 100, 105, 114, 101, 99, 116, 44,", "32, 45, 45, 32, 65, 98, 115, 111, 108, 117, 116, 101, 44,", "32, 32, 32, 32, 32, 85, 112, 47, 68, 111, 119, 110, 58,", "97, 108, 108, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32,", "= cpu._prepare() info['op'] = op info['address'] = address_for_log_info(addr) try: differ.diff(info) except ld.AllTestsPassed: break", "116, 58, 32, 73, 110, 118, 97, 108, 105, 100, 32, 111, 112,", "位始终为 1 assert s._ignore == 1 assert s.value == (i | 0b00100000), s.value", "32, 83, 116, 97, 99, 107, 32, 116, 101, 115, 116, 115, 32,", "116, 44, 88, 41, 32, 116, 101, 115, 116, 115, 32, 32, 32,", "test_push_pop2(): mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1) cpu.push(2) cpu.push(3) cpu.push(4) expected", "101, 99, 116, 44, 88, 41, 32, 116, 101, 115, 116, 115, 32,", "getattr(s, k) assert r == v, r def address_for_log_info(addr): if addr is None:", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]", "2 result = mem.prg_rom assert expected == result, result def test_status(): for i", "cpu = nc.NesCPU(mem) cpu.push(1) expected = 1 result = cpu.pop() assert expected ==", "116, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "cpu = nc.NesCPU() cpu.load_nes(nes) cpu.interrupt('reset') for _ in range(20000): cpu.execute() expected = [", "== result, result def test_push(): \"\"\" addr = s + 0x0100 \"\"\" mem", "= nc.Memory(None, None) mem.load_nes(nes) if len(nes.prg_rom) == 32 * 1024: expected = nes.prg_rom", "97, 99, 107, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32,", "s = nc._Status(0x24) expected = dict( carry=0, zero=0, interrupt=1, decimal=0, overflow=0, negative=0, )", "115, 111, 108, 117, 116, 101, 44, 89, 32, 116, 101, 115, 116,", "2, 1] result = [cpu.pop() for _ in range(4)] assert expected == result,", "nc.NesCPU(mem) sp = cpu.sp cpu.push(1) expected = 1 addr = sp + 0x0100", "45, 32, 66, 114, 97, 110, 99, 104, 32, 116, 101, 115, 116,", "test_push_pop1(): mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1) expected = 1 result", "nestest.nes 所需的特殊初始化 cpu.pc = 0xc000 cpu.status = nc._Status(0x24) while True: info = cpu.dump_registers()", "97, 103, 101, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32,", "32, 32, 32, 45, 45, 32, 40, 73, 110, 100, 105, 114, 101,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 83, 116, 97,", "97, 103, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32,", "\"\"\" mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) sp = cpu.sp cpu.push(1) expected", "73, 109, 112, 108, 105, 101, 100, 32, 116, 101, 115, 116, 115,", "= nc.Memory(None, None) mem.load_nes(nes) cpu = nc.NesCPU(mem) # nestest.nes 所需的特殊初始化 cpu.pc = 0xc000", "mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) sp = cpu.sp cpu.push(1) expected =", "32, 45, 45, 32, 73, 109, 112, 108, 105, 101, 100, 32, 116,", "32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 83, 116,", "def address_for_log_info(addr): if addr is None: return -1 else: return addr def test_by_log_differ():", "32, 32, 32, 32, 32, 32, 32, 32, 32, 85, 112, 47, 68,", "nc._Status(i) # 第 5 位始终为 1 assert s._ignore == 1 assert s.value ==", "def test_by_log_differ(): differ = ld.LogDiffer.from_json('misc/nestest_log.json') nes = nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes)", "97, 110, 99, 104, 32, 116, 101, 115, 116, 115, 32, 32, 32,", "True: info = cpu.dump_registers() op, addr, mode = cpu._prepare() info['op'] = op info['address']", "32, 32, 32, 32, 32, 45, 45, 32, 83, 116, 97, 99, 107,", "0b00100000), s.value def test_status2(): s = nc._Status(0x24) expected = dict( carry=0, zero=0, interrupt=1,", "32, 32, 32, 32, 32, 45, 45, 32, 66, 114, 97, 110, 99,", "as ld import nes_cpu as nc import nes_file_test as nft def test_load_nes(): nes", "cpu._execute(op, addr, mode) def test_push_pop1(): mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1)", "32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 65, 99, 99,", "45, 32, 65, 98, 115, 111, 108, 117, 116, 101, 44, 89, 32,", "45, 45, 32, 73, 109, 112, 108, 105, 101, 100, 32, 116, 101,", "112, 47, 68, 111, 119, 110, 58, 32, 115, 101, 108, 101, 99,", "117, 116, 101, 44, 88, 32, 116, 101, 115, 116, 115, 32, 32,", "98, 115, 111, 108, 117, 116, 101, 44, 89, 32, 116, 101, 115,", "k, v in expected.items(): r = getattr(s, k) assert r == v, r", "mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1) expected = 1 result =", "i in range(255): s = nc._Status(i) # 第 5 位始终为 1 assert s._ignore", "32, 32, 32, 32, 32, 32, 0, 0, 0, 0, 0, 0, 0,", "116, 115, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "expected.items(): r = getattr(s, k) assert r == v, r def address_for_log_info(addr): if", "45, 45, 32, 83, 116, 97, 99, 107, 32, 116, 101, 115, 116,", "[4, 3, 2, 1] result = [cpu.pop() for _ in range(4)] assert expected", "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 83, 116,", "result def test_push_pop2(): mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) cpu.push(1) cpu.push(2) cpu.push(3)", "110, 118, 97, 108, 105, 100, 32, 111, 112, 115, 33, 32, 32,", "None) cpu = nc.NesCPU(mem) sp = cpu.sp cpu.push(1) expected = 1 addr =", "32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 73, 109, 109,", "32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 40, 73,", "= dict( carry=0, zero=0, interrupt=1, decimal=0, overflow=0, negative=0, ) for k, v in", "111, 108, 117, 116, 101, 44, 89, 32, 116, 101, 115, 116, 115,", "assert expected == result, result def test_status(): for i in range(255): s =", "117, 108, 97, 116, 111, 114, 32, 116, 101, 115, 116, 115, 32,", "32, 32, 32, 45, 45, 32, 82, 117, 110, 32, 97, 108, 108,", "116, 41, 44, 89, 32, 116, 101, 115, 116, 115, 32, 32, 32,", "32, 116, 101, 115, 116, 32, 32, 32, 32, 32, 32, 32, 32,", "33, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "101, 44, 89, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 73,", "32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 65, 98, 115,", "115, 33, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,", "import nes_cpu as nc import nes_file_test as nft def test_load_nes(): nes = nft.prepared_nes()", "32, 32, 32, 45, 45, 32, 73, 109, 112, 108, 105, 101, 100,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 66,", "70, 108, 97, 103, 32, 116, 101, 115, 116, 115, 32, 32, 32,", "None) cpu = nc.NesCPU(mem) cpu.push(1) expected = 1 result = cpu.pop() assert expected", "else: expected = nes.prg_rom * 2 result = mem.prg_rom assert expected == result,", "115, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45,", "addr = sp + 0x0100 result = cpu.memory[addr] assert expected == result, result", "import nes_file_test as nft def test_load_nes(): nes = nft.prepared_nes() mem = nc.Memory(None, None)", "result = cpu.pop() assert expected == result, result def test_push_pop2(): mem = nc.Memory(None,", "32, 73, 110, 118, 97, 108, 105, 100, 32, 111, 112, 115, 33,", "nes.prg_rom * 2 result = mem.prg_rom assert expected == result, result def test_status():", "addr def test_by_log_differ(): differ = ld.LogDiffer.from_json('misc/nestest_log.json') nes = nft.prepared_nes() mem = nc.Memory(None, None)", "= s + 0x0100 \"\"\" mem = nc.Memory(None, None) cpu = nc.NesCPU(mem) sp", "ld import nes_cpu as nc import nes_file_test as nft def test_load_nes(): nes =", "40, 73, 110, 100, 105, 114, 101, 99, 116, 41, 44, 89, 32,", "1 assert s._ignore == 1 assert s.value == (i | 0b00100000), s.value def", "= nc._Status(0x24) while True: info = cpu.dump_registers() op, addr, mode = cpu._prepare() info['op']", "k) assert r == v, r def address_for_log_info(addr): if addr is None: return", "110, 100, 105, 114, 101, 99, 116, 44, 88, 41, 32, 116, 101,", "99, 116, 41, 44, 89, 32, 116, 101, 115, 116, 115, 32, 32,", "88, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32,", "info = cpu.dump_registers() op, addr, mode = cpu._prepare() info['op'] = op info['address'] =", "= nc.NesCPU(mem) # nestest.nes 所需的特殊初始化 cpu.pc = 0xc000 cpu.status = nc._Status(0x24) while True:", "32, 32, 32, 32, 32, 32, 45, 45, 32, 73, 109, 109, 101,", "45, 32, 65, 98, 115, 111, 108, 117, 116, 101, 44, 88, 32,", "32, 32, 83, 101, 108, 101, 99, 116, 58, 32, 73, 110, 118,", "99, 117, 109, 117, 108, 97, 116, 111, 114, 32, 116, 101, 115,", "nes_file_test as nft def test_load_nes(): nes = nft.prepared_nes() mem = nc.Memory(None, None) mem.load_nes(nes)", "90, 101, 114, 111, 112, 97, 103, 101, 32, 116, 101, 115, 116,", "cpu.push(1) expected = 1 addr = sp + 0x0100 result = cpu.memory[addr] assert", "mode = cpu._prepare() info['op'] = op info['address'] = address_for_log_info(addr) try: differ.diff(info) except ld.AllTestsPassed:", "32, 32, 32, 45, 45, 32, 70, 108, 97, 103, 32, 116, 101,", "108, 97, 103, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32,", "expected = 1 result = cpu.pop() assert expected == result, result def test_push_pop2():", "108, 101, 99, 116, 32, 116, 101, 115, 116, 32, 32, 32, 32,", "result = [cpu.pop() for _ in range(4)] assert expected == result, result def", "32, 32, 32, 45, 45, 32, 66, 114, 97, 110, 99, 104, 32,", "32, 32, 32, 32, 32, 32, 45, 45, 32, 73, 109, 112, 108,", "32, 32, 32, 32, 32, 32, 32, 32, 32, 0, 0, 0, 0,", "32, 32, 32, 32, 32, 45, 45, 32, 70, 108, 97, 103, 32,", "nes.prg_rom else: expected = nes.prg_rom * 2 result = mem.prg_rom assert expected ==", "= nc.NesCPU() cpu.load_nes(nes) cpu.interrupt('reset') for _ in range(20000): cpu.execute() expected = [ 32,", "None: return -1 else: return addr def test_by_log_differ(): differ = ld.LogDiffer.from_json('misc/nestest_log.json') nes =", "73, 109, 109, 101, 100, 105, 97, 116, 101, 32, 116, 101, 115,", "* 2 result = mem.prg_rom assert expected == result, result def test_status(): for", "99, 107, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32,", "32, 45, 45, 32, 90, 101, 114, 111, 112, 97, 103, 101, 32,", "97, 108, 105, 100, 32, 111, 112, 115, 33, 32, 32, 32, 32,", "115, 111, 108, 117, 116, 101, 44, 88, 32, 116, 101, 115, 116," ]
[]
[ "swapcount += 1 a[j], a[j+1] = a[j+1], a[j] print(f\"Array is sorted in {swapcount}", "> a[j+1]: swapcount += 1 a[j], a[j+1] = a[j+1], a[j] print(f\"Array is sorted", "math import os import random import re import sys a = [6, 4,", "in range(0, n-i-1): if a[j] > a[j+1]: swapcount += 1 a[j], a[j+1] =", "# Complete the countSwaps function below. def countSwaps(a): n = len(a) swapcount =", "a = [6, 4, 1] # Complete the countSwaps function below. def countSwaps(a):", "range(0, n-i-1): if a[j] > a[j+1]: swapcount += 1 a[j], a[j+1] = a[j+1],", "[6, 4, 1] # Complete the countSwaps function below. def countSwaps(a): n =", "function below. def countSwaps(a): n = len(a) swapcount = 0 for i in", "n-i-1): if a[j] > a[j+1]: swapcount += 1 a[j], a[j+1] = a[j+1], a[j]", "os import random import re import sys a = [6, 4, 1] #", "def countSwaps(a): n = len(a) swapcount = 0 for i in range(n): for", "a[j], a[j+1] = a[j+1], a[j] print(f\"Array is sorted in {swapcount} swaps.\") print(f\"First Element:", "import sys a = [6, 4, 1] # Complete the countSwaps function below.", "import math import os import random import re import sys a = [6,", "range(n): for j in range(0, n-i-1): if a[j] > a[j+1]: swapcount += 1", "a[j] > a[j+1]: swapcount += 1 a[j], a[j+1] = a[j+1], a[j] print(f\"Array is", "the countSwaps function below. def countSwaps(a): n = len(a) swapcount = 0 for", "for j in range(0, n-i-1): if a[j] > a[j+1]: swapcount += 1 a[j],", "0 for i in range(n): for j in range(0, n-i-1): if a[j] >", "a[j+1] = a[j+1], a[j] print(f\"Array is sorted in {swapcount} swaps.\") print(f\"First Element: {a[0]}\")", "= len(a) swapcount = 0 for i in range(n): for j in range(0,", "in range(n): for j in range(0, n-i-1): if a[j] > a[j+1]: swapcount +=", "i in range(n): for j in range(0, n-i-1): if a[j] > a[j+1]: swapcount", "countSwaps(a): n = len(a) swapcount = 0 for i in range(n): for j", "import random import re import sys a = [6, 4, 1] # Complete", "swapcount = 0 for i in range(n): for j in range(0, n-i-1): if", "print(f\"Array is sorted in {swapcount} swaps.\") print(f\"First Element: {a[0]}\") print(f\"Last Element: {a[-1]}\") countSwaps(a)", "random import re import sys a = [6, 4, 1] # Complete the", "Complete the countSwaps function below. def countSwaps(a): n = len(a) swapcount = 0", "= 0 for i in range(n): for j in range(0, n-i-1): if a[j]", "below. def countSwaps(a): n = len(a) swapcount = 0 for i in range(n):", "j in range(0, n-i-1): if a[j] > a[j+1]: swapcount += 1 a[j], a[j+1]", "1 a[j], a[j+1] = a[j+1], a[j] print(f\"Array is sorted in {swapcount} swaps.\") print(f\"First", "sys a = [6, 4, 1] # Complete the countSwaps function below. def", "import re import sys a = [6, 4, 1] # Complete the countSwaps", "4, 1] # Complete the countSwaps function below. def countSwaps(a): n = len(a)", "countSwaps function below. def countSwaps(a): n = len(a) swapcount = 0 for i", "n = len(a) swapcount = 0 for i in range(n): for j in", "a[j+1], a[j] print(f\"Array is sorted in {swapcount} swaps.\") print(f\"First Element: {a[0]}\") print(f\"Last Element:", "1] # Complete the countSwaps function below. def countSwaps(a): n = len(a) swapcount", "for i in range(n): for j in range(0, n-i-1): if a[j] > a[j+1]:", "a[j+1]: swapcount += 1 a[j], a[j+1] = a[j+1], a[j] print(f\"Array is sorted in", "= [6, 4, 1] # Complete the countSwaps function below. def countSwaps(a): n", "re import sys a = [6, 4, 1] # Complete the countSwaps function", "import os import random import re import sys a = [6, 4, 1]", "len(a) swapcount = 0 for i in range(n): for j in range(0, n-i-1):", "= a[j+1], a[j] print(f\"Array is sorted in {swapcount} swaps.\") print(f\"First Element: {a[0]}\") print(f\"Last", "a[j] print(f\"Array is sorted in {swapcount} swaps.\") print(f\"First Element: {a[0]}\") print(f\"Last Element: {a[-1]}\")", "+= 1 a[j], a[j+1] = a[j+1], a[j] print(f\"Array is sorted in {swapcount} swaps.\")", "if a[j] > a[j+1]: swapcount += 1 a[j], a[j+1] = a[j+1], a[j] print(f\"Array" ]
[ "RB04, RB05, RB06, RB07, ] return rodentBreeds def get_other_breeds(): otherBreed = [DE01,] return", "DB21), (DB22, DB22), (DB23, DB23), (DB24, DB24), (DB25, DB25), (DB26, DB26), (DB27, DB27),", "'Jandaia' BB21 = 'Lóris' BB22 = 'Mainá' BB23 = 'Modesto' BB24 = 'Papagaio'", "= 'Tucano' # RODENT BREED RB00 = 'Camundongo' RB01 = 'Chinchila' RB02 =", "(BB30, BB30), (BB31, BB31), (BB32, BB32), (BB33, BB33), (BB34, BB34), (BB35, BB35), (RB00,", "= 'Devon Rex' CB19 = 'Exóticos' CB20 = 'Foldex' CB21 = 'German Rex'", "DB08, DB09, DB10, DB11, DB12, DB13, DB14, DB15, DB16, DB17, DB18, DB19, DB20,", "'Azulão' BB04 = 'Bavete' BB05 = 'Bicudo' BB06 = 'Cabloquinho' BB07 = 'Cacatua'", "BB14 = 'Coruja' BB15 = 'Curió' BB16 = 'Diamante Mandarin' BB17 = 'Dominó'", "# RODENT BREED RB00 = 'Camundongo' RB01 = 'Chinchila' RB02 = 'Gerbil -", "BB17, BB18, BB19, BB20, BB21, BB22, BB23, BB24, BB25, BB26, BB27, BB28, BB29,", "[ DE00, DE01, RB00, RB01, RB02, RB03, RB04, RB05, RB06, RB07, ] return", "Pet(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) image = models.ImageField(upload_to='pet_image', blank=False, null=False) name =", "da MOngólia' RB03 = 'Hamster Anão Russo' RB04 = 'Hamster Sírio' RB05 =", "(CACHORRO, CACHORRO), (PASSARO, PASSARO), (ROEDOR, ROEDOR), (OUTRO, OUTRO),] BREED_CHOICES = [ (DE00, DE00),", "'Sem raça definida' DE01 = 'Outra' # CAT BREED CB00 = 'Abssínios' CB01", "DB26, DB27, DB28, DB29, DB30, DB31, DB32, DB33, DB34, DB35, ] return dogBreeds", "BB12), (BB13, BB13), (BB14, BB14), (BB15, BB15), (BB16, BB16), (BB17, BB17), (BB18, BB18),", "RB00), (RB01, RB01), (RB02, RB02), (RB03, RB03), (RB04, RB04), (RB05, RB05), (RB06, RB06),", "= 'Burmilla' CB14 = 'Califórinia Spangled' CB15 = 'Chartreux' CB16 = 'Cornish Rex'", "= 'Cornish Rex' CB17 = 'Cymric' CB18 = 'Devon Rex' CB19 = 'Exóticos'", "BB29 = 'Pintassilgo' BB30 = 'Pombo' BB31 = 'Rolinha' BB32 = 'Rouxinol' BB33", "BB31, BB32, BB33, BB34, BB35, ] return birdBreeds def get_rodent_breeds(): rodentBreeds = [", "(RB07, RB07), ] def get_cat_breeds(): catBreeds = [ DE00, DE01, CB00, CB01, CB02,", "[('M', 'Macho'), ('F', 'Fêmea')] # PET TYPE GATO = 'Gato' CACHORRO = 'Cachorro'", "= 'Cachorro' PASSARO = 'Pássaro' ROEDOR = 'Roedor' OUTRO = 'Outro' # DEFAULT", "CB32 = 'Ragdoll' CB33 = '<NAME>' CB34 = 'Ragamuffin' CB35 = 'Ragdoll' #", "DB03 = 'Boiadeiro australiano' DB04 = 'Border collie' DB05 = 'Boston terrier' DB06", "= 'Gerbil - Esquilo da MOngólia' RB03 = 'Hamster Anão Russo' RB04 =", "BB28 = 'Pica-pau' BB29 = 'Pintassilgo' BB30 = 'Pombo' BB31 = 'Rolinha' BB32", "BB11 = 'Cardeal' BB12 = 'Coleiro' BB13 = 'Cordonbleu' BB14 = 'Coruja' BB15", "(DB04, DB04), (DB05, DB05), (DB06, DB06), (DB07, DB07), (DB08, DB08), (DB09, DB09), (DB10,", "= 'Cymric' CB18 = 'Devon Rex' CB19 = 'Exóticos' CB20 = 'Foldex' CB21", "= 'Akita' DB01 = 'Basset hound' DB02 = 'Beagle' DB03 = 'Boiadeiro australiano'", "'Cornish Rex' CB17 = 'Cymric' CB18 = 'Devon Rex' CB19 = 'Exóticos' CB20", "blank=False, null=False) description = models.CharField(max_length=500, blank=False, null=False) age = models.PositiveSmallIntegerField(null=True) size = models.CharField(max_length=1,", "pomerânia' DB22 = 'Maltês' DB23 = 'Pastor alemão' DB24 = 'Pastor australianoPastor de", "DB01), (DB02, DB02), (DB03, DB03), (DB04, DB04), (DB05, DB05), (DB06, DB06), (DB07, DB07),", "DB28), (DB29, DB29), (DB30, DB30), (DB31, DB31), (DB32, DB32), (DB33, DB33), (DB34, DB34),", "= 'Cardeal' BB12 = 'Coleiro' BB13 = 'Cordonbleu' BB14 = 'Coruja' BB15 =", "'Basset hound' DB02 = 'Beagle' DB03 = 'Boiadeiro australiano' DB04 = 'Border collie'", "RB05), (RB06, RB06), (RB07, RB07), ] def get_cat_breeds(): catBreeds = [ DE00, DE01,", "'Burmilla' CB14 = 'Califórinia Spangled' CB15 = 'Chartreux' CB16 = 'Cornish Rex' CB17", "(DB05, DB05), (DB06, DB06), (DB07, DB07), (DB08, DB08), (DB09, DB09), (DB10, DB10), (DB11,", "CB22, CB23, CB24, CB25, CB26, CB27, CB28, CB29, CB30, CB31, CB32, CB33, CB34,", "'Pit bull' DB28 = 'Poodle' DB29 = 'Pug' DB30 = 'Rottweiler' DB31 =", "da índia' RB07 = 'Topolino' TYPE_CHOICES = [(GATO, GATO), (CACHORRO, CACHORRO), (PASSARO, PASSARO),", "= 'Shar-pei' DB32 = 'Shiba' DB33 = 'Shih tzu' DB34 = 'Weimaraner' DB35", "DB28, DB29, DB30, DB31, DB32, DB33, DB34, DB35, ] return dogBreeds def get_bird_breeds():", "BB16), (BB17, BB17), (BB18, BB18), (BB19, BB19), (BB20, BB20), (BB21, BB21), (BB22, BB22),", "(BB28, BB28), (BB29, BB29), (BB30, BB30), (BB31, BB31), (BB32, BB32), (BB33, BB33), (BB34,", "'Médio'), ('G', 'Grande')] PET_SEX = [('M', 'Macho'), ('F', 'Fêmea')] # PET TYPE GATO", "BB25, BB26, BB27, BB28, BB29, BB30, BB31, BB32, BB33, BB34, BB35, ] return", "'Papagaio' BB25 = 'Pássaro Preto' BB26 = 'Patativa' BB27 = 'Perequito Autraliano' BB28", "CACHORRO = 'Cachorro' PASSARO = 'Pássaro' ROEDOR = 'Roedor' OUTRO = 'Outro' #", "BB31), (BB32, BB32), (BB33, BB33), (BB34, BB34), (BB35, BB35), (RB00, RB00), (RB01, RB01),", "BREED BB00 = 'Agapornis' BB01 = 'Araponga' BB02 = 'Arara' BB03 = 'Azulão'", "BB13 = 'Cordonbleu' BB14 = 'Coruja' BB15 = 'Curió' BB16 = 'Diamante Mandarin'", "PET_SIZES = [('P', 'Pequeno'), ('M', 'Médio'), ('G', 'Grande')] PET_SEX = [('M', 'Macho'), ('F',", "(DB11, DB11), (DB12, DB12), (DB13, DB13), (DB14, DB14), (DB15, DB15), (DB16, DB16), (DB17,", "CB20), (CB21, CB21), (CB22, CB22), (CB23, CB23), (CB24, CB24), (CB25, CB25), (CB26, CB26),", "DOG BREED DB00 = 'Akita' DB01 = 'Basset hound' DB02 = 'Beagle' DB03", "collie' DB05 = 'Boston terrier' DB06 = 'Boxer' DB07 = 'Buldogue' DB08 =", "get_rodent_breeds(): rodentBreeds = [ DE00, DE01, RB00, RB01, RB02, RB03, RB04, RB05, RB06,", "DB10 = 'Chow chow' DB11 = 'Dálmata' DB12 = 'Doberman' DB13 = 'Dogo", "BB15), (BB16, BB16), (BB17, BB17), (BB18, BB18), (BB19, BB19), (BB20, BB20), (BB21, BB21),", "DB26 = 'Pinscher' DB27 = 'Pit bull' DB28 = 'Poodle' DB29 = 'Pug'", "(BB32, BB32), (BB33, BB33), (BB34, BB34), (BB35, BB35), (RB00, RB00), (RB01, RB01), (RB02,", "= 'Boxer' DB07 = 'Buldogue' DB08 = 'Bull terrier' DB09 = 'Chihuahua' DB10", "= models.ForeignKey(User, default=None, on_delete=models.CASCADE) image = models.ImageField(upload_to='pet_image', blank=False, null=False) name = models.CharField(max_length=30, blank=False,", "da pomerânia' DB22 = 'Maltês' DB23 = 'Pastor alemão' DB24 = 'Pastor australianoPastor", "Russo' RB04 = 'Hamster Sírio' RB05 = 'Mecol - Twister' RB06 = 'Porquinho", "= 'Khao Manee' CB27 = 'Korat' CB28 = 'Maine Coon' CB29 = 'Manx'", "= 'Chow chow' DB11 = 'Dálmata' DB12 = 'Doberman' DB13 = 'Dogo argentino'", "CB02, CB03, CB04, CB05, CB06, CB07, CB08, CB09, CB10, CB11, CB12, CB13, CB14,", "CB05 = 'Azul Russo' CB06 = 'Balineses' CB07 = 'Bengalês' CB08 = 'Bobtail'", "BB07), (BB08, BB08), (BB09, BB09), (BB10, BB10), (BB11, BB11), (BB12, BB12), (BB13, BB13),", "(RB01, RB01), (RB02, RB02), (RB03, RB03), (RB04, RB04), (RB05, RB05), (RB06, RB06), (RB07,", "BB20, BB21, BB22, BB23, BB24, BB25, BB26, BB27, BB28, BB29, BB30, BB31, BB32,", "CB18), (CB19, CB19), (CB20, CB20), (CB21, CB21), (CB22, CB22), (CB23, CB23), (CB24, CB24),", "(DB20, DB20), (DB21, DB21), (DB22, DB22), (DB23, DB23), (DB24, DB24), (DB25, DB25), (DB26,", "models.CharField(max_length=1, choices=PET_SEX, blank=False, null=False) vaccinated = models.BooleanField(default=False) castrated = models.BooleanField(default=False) dewormed = models.BooleanField(default=False)", "import User from django.db import models PET_SIZES = [('P', 'Pequeno'), ('M', 'Médio'), ('G',", "[ DE00, DE01, BB00, BB01, BB02, BB03, BB04, BB05, BB06, BB07, BB08, BB09,", "DB22, DB23, DB24, DB25, DB26, DB27, DB28, DB29, DB30, DB31, DB32, DB33, DB34,", "chow' DB11 = 'Dálmata' DB12 = 'Doberman' DB13 = 'Dogo argentino' DB14 =", "DB15, DB16, DB17, DB18, DB19, DB20, DB21, DB22, DB23, DB24, DB25, DB26, DB27,", "DB09 = 'Chihuahua' DB10 = 'Chow chow' DB11 = 'Dálmata' DB12 = 'Doberman'", "get_other_breeds(): otherBreed = [DE01,] return otherBreed class Pet(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE)", "Wirehair' CB05 = 'Azul Russo' CB06 = 'Balineses' CB07 = 'Bengalês' CB08 =", "CB09 = 'Bobtail Japonês' CB10 = 'Bombay' CB11 = 'British Shorthair' CB12 =", "PASSARO), (ROEDOR, ROEDOR), (OUTRO, OUTRO),] BREED_CHOICES = [ (DE00, DE00), (DE01, DE01), (CB00,", "= 'Alemão de pelo comprido' CB02 = 'American Curl' CB03 = 'American Shorthair'", "'German Rex' CB22 = 'Habana' CB23 = 'High Land Fold' CB24 = 'Himalaios'", "birdBreeds def get_rodent_breeds(): rodentBreeds = [ DE00, DE01, RB00, RB01, RB02, RB03, RB04,", "= 'Dálmata' DB12 = 'Doberman' DB13 = 'Dogo argentino' DB14 = 'Dogue alemão'", "BB17), (BB18, BB18), (BB19, BB19), (BB20, BB20), (BB21, BB21), (BB22, BB22), (BB23, BB23),", "CAT BREED CB00 = 'Abssínios' CB01 = 'Alemão de pelo comprido' CB02 =", "CB33, CB34, CB35, ] return catBreeds def get_dog_breeds(): dogBreeds = [ DE00, DE01,", "DB22 = 'Maltês' DB23 = 'Pastor alemão' DB24 = 'Pastor australianoPastor de Shetland'", "CB12), (CB13, CB13), (CB14, CB14), (CB15, CB15), (CB16, CB16), (CB17, CB17), (CB18, CB18),", "comprido' CB02 = 'American Curl' CB03 = 'American Shorthair' CB04 = 'American Wirehair'", "Russo' CB06 = 'Balineses' CB07 = 'Bengalês' CB08 = 'Bobtail' CB09 = 'Bobtail", "'Khao Manee' CB27 = 'Korat' CB28 = 'Maine Coon' CB29 = 'Manx' CB30", "Spangled' CB15 = 'Chartreux' CB16 = 'Cornish Rex' CB17 = 'Cymric' CB18 =", "BB28), (BB29, BB29), (BB30, BB30), (BB31, BB31), (BB32, BB32), (BB33, BB33), (BB34, BB34),", "RB07, ] return rodentBreeds def get_other_breeds(): otherBreed = [DE01,] return otherBreed class Pet(models.Model):", "TYPE GATO = 'Gato' CACHORRO = 'Cachorro' PASSARO = 'Pássaro' ROEDOR = 'Roedor'", "DB02 = 'Beagle' DB03 = 'Boiadeiro australiano' DB04 = 'Border collie' DB05 =", "def get_bird_breeds(): birdBreeds = [ DE00, DE01, BB00, BB01, BB02, BB03, BB04, BB05,", "'Modesto' BB24 = 'Papagaio' BB25 = 'Pássaro Preto' BB26 = 'Patativa' BB27 =", "Autraliano' BB28 = 'Pica-pau' BB29 = 'Pintassilgo' BB30 = 'Pombo' BB31 = 'Rolinha'", "DB14), (DB15, DB15), (DB16, DB16), (DB17, DB17), (DB18, DB18), (DB19, DB19), (DB20, DB20),", "= 'Beagle' DB03 = 'Boiadeiro australiano' DB04 = 'Border collie' DB05 = 'Boston", "= 'Sem raça definida' DE01 = 'Outra' # CAT BREED CB00 = 'Abssínios'", "DB20, DB21, DB22, DB23, DB24, DB25, DB26, DB27, DB28, DB29, DB30, DB31, DB32,", "BIRD BREED BB00 = 'Agapornis' BB01 = 'Araponga' BB02 = 'Arara' BB03 =", "CB27), (CB28, CB28), (CB29, CB29), (CB30, CB30), (CB31, CB31), (CB32, CB32), (CB33, CB33),", "'Bavete' BB05 = 'Bicudo' BB06 = 'Cabloquinho' BB07 = 'Cacatua' BB08 = 'Calafete'", "= 'Patativa' BB27 = 'Perequito Autraliano' BB28 = 'Pica-pau' BB29 = 'Pintassilgo' BB30", "CB16 = 'Cornish Rex' CB17 = 'Cymric' CB18 = 'Devon Rex' CB19 =", "= 'Pássaro' ROEDOR = 'Roedor' OUTRO = 'Outro' # DEFAULT DE00 = 'Sem", "BB22, BB23, BB24, BB25, BB26, BB27, BB28, BB29, BB30, BB31, BB32, BB33, BB34,", "'Bull terrier' DB09 = 'Chihuahua' DB10 = 'Chow chow' DB11 = 'Dálmata' DB12", "CB31, CB32, CB33, CB34, CB35, ] return catBreeds def get_dog_breeds(): dogBreeds = [", "'Korat' CB28 = 'Maine Coon' CB29 = 'Manx' CB30 = '<NAME>' CB31 =", "BB05, BB06, BB07, BB08, BB09, BB10, BB11, BB12, BB13, BB14, BB15, BB16, BB17,", "django.db import models PET_SIZES = [('P', 'Pequeno'), ('M', 'Médio'), ('G', 'Grande')] PET_SEX =", "Shorthair' CB04 = 'American Wirehair' CB05 = 'Azul Russo' CB06 = 'Balineses' CB07", "(DB08, DB08), (DB09, DB09), (DB10, DB10), (DB11, DB11), (DB12, DB12), (DB13, DB13), (DB14,", "Shetland' DB25 = 'Pequinês' DB26 = 'Pinscher' DB27 = 'Pit bull' DB28 =", "'<NAME>' CB31 = '<NAME>' CB32 = 'Ragdoll' CB33 = '<NAME>' CB34 = 'Ragamuffin'", "DB34 = 'Weimaraner' DB35 = 'Yorkshire' # BIRD BREED BB00 = 'Agapornis' BB01", "= 'Topolino' TYPE_CHOICES = [(GATO, GATO), (CACHORRO, CACHORRO), (PASSARO, PASSARO), (ROEDOR, ROEDOR), (OUTRO,", "BB05), (BB06, BB06), (BB07, BB07), (BB08, BB08), (BB09, BB09), (BB10, BB10), (BB11, BB11),", "default=None, on_delete=models.CASCADE) image = models.ImageField(upload_to='pet_image', blank=False, null=False) name = models.CharField(max_length=30, blank=False, null=False) description", "= 'Arara' BB03 = 'Azulão' BB04 = 'Bavete' BB05 = 'Bicudo' BB06 =", "(DB18, DB18), (DB19, DB19), (DB20, DB20), (DB21, DB21), (DB22, DB22), (DB23, DB23), (DB24,", "'Abssínios' CB01 = 'Alemão de pelo comprido' CB02 = 'American Curl' CB03 =", "CB01 = 'Alemão de pelo comprido' CB02 = 'American Curl' CB03 = 'American", "CB25), (CB26, CB26), (CB27, CB27), (CB28, CB28), (CB29, CB29), (CB30, CB30), (CB31, CB31),", "BB10, BB11, BB12, BB13, BB14, BB15, BB16, BB17, BB18, BB19, BB20, BB21, BB22,", "= 'Shih tzu' DB34 = 'Weimaraner' DB35 = 'Yorkshire' # BIRD BREED BB00", "DE00, DE01, DB00, DB01, DB02, DB03, DB04, DB05, DB06, DB07, DB08, DB09, DB10,", "(DB00, DB00), (DB01, DB01), (DB02, DB02), (DB03, DB03), (DB04, DB04), (DB05, DB05), (DB06,", "= 'Papagaio' BB25 = 'Pássaro Preto' BB26 = 'Patativa' BB27 = 'Perequito Autraliano'", "= 'Doberman' DB13 = 'Dogo argentino' DB14 = 'Dogue alemão' DB15 = 'Fila", "Rex' CB19 = 'Exóticos' CB20 = 'Foldex' CB21 = 'German Rex' CB22 =", "'Pica-pau' BB29 = 'Pintassilgo' BB30 = 'Pombo' BB31 = 'Rolinha' BB32 = 'Rouxinol'", "CB00), (CB01, CB01), (CB02, CB02), (CB03, CB03), (CB04, CB04), (CB05, CB05), (CB06, CB06),", "(DB01, DB01), (DB02, DB02), (DB03, DB03), (DB04, DB04), (DB05, DB05), (DB06, DB06), (DB07,", "(BB09, BB09), (BB10, BB10), (BB11, BB11), (BB12, BB12), (BB13, BB13), (BB14, BB14), (BB15,", "] return rodentBreeds def get_other_breeds(): otherBreed = [DE01,] return otherBreed class Pet(models.Model): user", "= models.CharField(max_length=500, blank=False, null=False) age = models.PositiveSmallIntegerField(null=True) size = models.CharField(max_length=1, choices=PET_SIZES, blank=False, null=False)", "DB24), (DB25, DB25), (DB26, DB26), (DB27, DB27), (DB28, DB28), (DB29, DB29), (DB30, DB30),", "'American Shorthair' CB04 = 'American Wirehair' CB05 = 'Azul Russo' CB06 = 'Balineses'", "= 'Cacatua' BB08 = 'Calafete' BB09 = 'Calopsita' BB10 = 'Canário' BB11 =", "BB33, BB34, BB35, ] return birdBreeds def get_rodent_breeds(): rodentBreeds = [ DE00, DE01,", "CB20, CB21, CB22, CB23, CB24, CB25, CB26, CB27, CB28, CB29, CB30, CB31, CB32,", "blank=False, null=False) sex = models.CharField(max_length=1, choices=PET_SEX, blank=False, null=False) vaccinated = models.BooleanField(default=False) castrated =", "(BB25, BB25), (BB26, BB26), (BB27, BB27), (BB28, BB28), (BB29, BB29), (BB30, BB30), (BB31,", "bull' DB28 = 'Poodle' DB29 = 'Pug' DB30 = 'Rottweiler' DB31 = 'Shar-pei'", "= 'Tico-tico' BB36 = 'Tucano' # RODENT BREED RB00 = 'Camundongo' RB01 =", "'Pequeno'), ('M', 'Médio'), ('G', 'Grande')] PET_SEX = [('M', 'Macho'), ('F', 'Fêmea')] # PET", "DB19), (DB20, DB20), (DB21, DB21), (DB22, DB22), (DB23, DB23), (DB24, DB24), (DB25, DB25),", "= 'Camundongo' RB01 = 'Chinchila' RB02 = 'Gerbil - Esquilo da MOngólia' RB03", "(CB08, CB08), (CB09, CB09), (CB10, CB10), (CB11, CB11), (CB12, CB12), (CB13, CB13), (CB14,", "(CB23, CB23), (CB24, CB24), (CB25, CB25), (CB26, CB26), (CB27, CB27), (CB28, CB28), (CB29,", "DB35), (BB00, BB00), (BB01, BB01), (BB02, BB02), (BB03, BB03), (BB04, BB04), (BB05, BB05),", "OUTRO),] BREED_CHOICES = [ (DE00, DE00), (DE01, DE01), (CB00, CB00), (CB01, CB01), (CB02,", "(RB06, RB06), (RB07, RB07), ] def get_cat_breeds(): catBreeds = [ DE00, DE01, CB00,", "BB21, BB22, BB23, BB24, BB25, BB26, BB27, BB28, BB29, BB30, BB31, BB32, BB33,", "def get_rodent_breeds(): rodentBreeds = [ DE00, DE01, RB00, RB01, RB02, RB03, RB04, RB05,", "= 'Perequito Autraliano' BB28 = 'Pica-pau' BB29 = 'Pintassilgo' BB30 = 'Pombo' BB31", "'Yorkshire' # BIRD BREED BB00 = 'Agapornis' BB01 = 'Araponga' BB02 = 'Arara'", "(DB35, DB35), (BB00, BB00), (BB01, BB01), (BB02, BB02), (BB03, BB03), (BB04, BB04), (BB05,", "BB35, ] return birdBreeds def get_rodent_breeds(): rodentBreeds = [ DE00, DE01, RB00, RB01,", "# CAT BREED CB00 = 'Abssínios' CB01 = 'Alemão de pelo comprido' CB02", "RB01, RB02, RB03, RB04, RB05, RB06, RB07, ] return rodentBreeds def get_other_breeds(): otherBreed", "catBreeds def get_dog_breeds(): dogBreeds = [ DE00, DE01, DB00, DB01, DB02, DB03, DB04,", "(BB33, BB33), (BB34, BB34), (BB35, BB35), (RB00, RB00), (RB01, RB01), (RB02, RB02), (RB03,", "CB20 = 'Foldex' CB21 = 'German Rex' CB22 = 'Habana' CB23 = 'High", "de Shetland' DB25 = 'Pequinês' DB26 = 'Pinscher' DB27 = 'Pit bull' DB28", "= 'Araponga' BB02 = 'Arara' BB03 = 'Azulão' BB04 = 'Bavete' BB05 =", "DB08), (DB09, DB09), (DB10, DB10), (DB11, DB11), (DB12, DB12), (DB13, DB13), (DB14, DB14),", "DB27 = 'Pit bull' DB28 = 'Poodle' DB29 = 'Pug' DB30 = 'Rottweiler'", "(DB21, DB21), (DB22, DB22), (DB23, DB23), (DB24, DB24), (DB25, DB25), (DB26, DB26), (DB27,", "DB27, DB28, DB29, DB30, DB31, DB32, DB33, DB34, DB35, ] return dogBreeds def", "= 'Bull terrier' DB09 = 'Chihuahua' DB10 = 'Chow chow' DB11 = 'Dálmata'", "Japonês' CB10 = 'Bombay' CB11 = 'British Shorthair' CB12 = 'Burmês' CB13 =", "'Chihuahua' DB10 = 'Chow chow' DB11 = 'Dálmata' DB12 = 'Doberman' DB13 =", "BB32), (BB33, BB33), (BB34, BB34), (BB35, BB35), (RB00, RB00), (RB01, RB01), (RB02, RB02),", "'Coruja' BB15 = 'Curió' BB16 = 'Diamante Mandarin' BB17 = 'Dominó' BB18 =", "'<NAME>' CB32 = 'Ragdoll' CB33 = '<NAME>' CB34 = 'Ragamuffin' CB35 = 'Ragdoll'", "def get_dog_breeds(): dogBreeds = [ DE00, DE01, DB00, DB01, DB02, DB03, DB04, DB05,", "(DB12, DB12), (DB13, DB13), (DB14, DB14), (DB15, DB15), (DB16, DB16), (DB17, DB17), (DB18,", "= '<NAME>' CB32 = 'Ragdoll' CB33 = '<NAME>' CB34 = 'Ragamuffin' CB35 =", "= 'Maine Coon' CB29 = 'Manx' CB30 = '<NAME>' CB31 = '<NAME>' CB32", "DB15), (DB16, DB16), (DB17, DB17), (DB18, DB18), (DB19, DB19), (DB20, DB20), (DB21, DB21),", "# BIRD BREED BB00 = 'Agapornis' BB01 = 'Araponga' BB02 = 'Arara' BB03", "= 'Califórinia Spangled' CB15 = 'Chartreux' CB16 = 'Cornish Rex' CB17 = 'Cymric'", "'Pastor alemão' DB24 = 'Pastor australianoPastor de Shetland' DB25 = 'Pequinês' DB26 =", "DB12), (DB13, DB13), (DB14, DB14), (DB15, DB15), (DB16, DB16), (DB17, DB17), (DB18, DB18),", "(BB18, BB18), (BB19, BB19), (BB20, BB20), (BB21, BB21), (BB22, BB22), (BB23, BB23), (BB24,", "models.ForeignKey(User, default=None, on_delete=models.CASCADE) image = models.ImageField(upload_to='pet_image', blank=False, null=False) name = models.CharField(max_length=30, blank=False, null=False)", "class Pet(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) image = models.ImageField(upload_to='pet_image', blank=False, null=False) name", "BB00), (BB01, BB01), (BB02, BB02), (BB03, BB03), (BB04, BB04), (BB05, BB05), (BB06, BB06),", "= 'Calopsita' BB10 = 'Canário' BB11 = 'Cardeal' BB12 = 'Coleiro' BB13 =", "CB24 = 'Himalaios' CB25 = 'Javaneses' CB26 = 'Khao Manee' CB27 = 'Korat'", "'Shiba' DB33 = 'Shih tzu' DB34 = 'Weimaraner' DB35 = 'Yorkshire' # BIRD", "'Outro' # DEFAULT DE00 = 'Sem raça definida' DE01 = 'Outra' # CAT", "= 'Dogo argentino' DB14 = 'Dogue alemão' DB15 = 'Fila brasileiro' DB16 =", "(CB35, CB35), (DB00, DB00), (DB01, DB01), (DB02, DB02), (DB03, DB03), (DB04, DB04), (DB05,", "CB31 = '<NAME>' CB32 = 'Ragdoll' CB33 = '<NAME>' CB34 = 'Ragamuffin' CB35", "BB04, BB05, BB06, BB07, BB08, BB09, BB10, BB11, BB12, BB13, BB14, BB15, BB16,", "'Hamster Sírio' RB05 = 'Mecol - Twister' RB06 = 'Porquinho da índia' RB07", "birdBreeds = [ DE00, DE01, BB00, BB01, BB02, BB03, BB04, BB05, BB06, BB07,", "BB07 = 'Cacatua' BB08 = 'Calafete' BB09 = 'Calopsita' BB10 = 'Canário' BB11", "BB04), (BB05, BB05), (BB06, BB06), (BB07, BB07), (BB08, BB08), (BB09, BB09), (BB10, BB10),", "BB17 = 'Dominó' BB18 = 'Explêndido' BB19 = 'Granatina' BB20 = 'Jandaia' BB21", "(DB28, DB28), (DB29, DB29), (DB30, DB30), (DB31, DB31), (DB32, DB32), (DB33, DB33), (DB34,", "Shorthair' CB12 = 'Burmês' CB13 = 'Burmilla' CB14 = 'Califórinia Spangled' CB15 =", "= 'Pica-pau' BB29 = 'Pintassilgo' BB30 = 'Pombo' BB31 = 'Rolinha' BB32 =", "dogBreeds = [ DE00, DE01, DB00, DB01, DB02, DB03, DB04, DB05, DB06, DB07,", "= 'Chihuahua' DB10 = 'Chow chow' DB11 = 'Dálmata' DB12 = 'Doberman' DB13", "= 'Ragamuffin' CB35 = 'Ragdoll' # DOG BREED DB00 = 'Akita' DB01 =", "CB07 = 'Bengalês' CB08 = 'Bobtail' CB09 = 'Bobtail Japonês' CB10 = 'Bombay'", "'Cymric' CB18 = 'Devon Rex' CB19 = 'Exóticos' CB20 = 'Foldex' CB21 =", "age = models.PositiveSmallIntegerField(null=True) size = models.CharField(max_length=1, choices=PET_SIZES, blank=False, null=False) sex = models.CharField(max_length=1, choices=PET_SEX,", "dewormed = models.BooleanField(default=False) vulnerable = models.BooleanField(default=False) isAdopted = models.BooleanField(default=False) pet_type = models.CharField(max_length=50, choices=TYPE_CHOICES)", "'Exóticos' CB20 = 'Foldex' CB21 = 'German Rex' CB22 = 'Habana' CB23 =", "= 'Abssínios' CB01 = 'Alemão de pelo comprido' CB02 = 'American Curl' CB03", "CB33 = '<NAME>' CB34 = 'Ragamuffin' CB35 = 'Ragdoll' # DOG BREED DB00", "= 'Lóris' BB22 = 'Mainá' BB23 = 'Modesto' BB24 = 'Papagaio' BB25 =", "'Maine Coon' CB29 = 'Manx' CB30 = '<NAME>' CB31 = '<NAME>' CB32 =", "BB34), (BB35, BB35), (RB00, RB00), (RB01, RB01), (RB02, RB02), (RB03, RB03), (RB04, RB04),", "DB05 = 'Boston terrier' DB06 = 'Boxer' DB07 = 'Buldogue' DB08 = 'Bull", "definida' DE01 = 'Outra' # CAT BREED CB00 = 'Abssínios' CB01 = 'Alemão", "DB24 = 'Pastor australianoPastor de Shetland' DB25 = 'Pequinês' DB26 = 'Pinscher' DB27", "= 'Burmês' CB13 = 'Burmilla' CB14 = 'Califórinia Spangled' CB15 = 'Chartreux' CB16", "(CB03, CB03), (CB04, CB04), (CB05, CB05), (CB06, CB06), (CB07, CB07), (CB08, CB08), (CB09,", "'Araponga' BB02 = 'Arara' BB03 = 'Azulão' BB04 = 'Bavete' BB05 = 'Bicudo'", "CB03), (CB04, CB04), (CB05, CB05), (CB06, CB06), (CB07, CB07), (CB08, CB08), (CB09, CB09),", "CB04), (CB05, CB05), (CB06, CB06), (CB07, CB07), (CB08, CB08), (CB09, CB09), (CB10, CB10),", "BB16, BB17, BB18, BB19, BB20, BB21, BB22, BB23, BB24, BB25, BB26, BB27, BB28,", "CB14), (CB15, CB15), (CB16, CB16), (CB17, CB17), (CB18, CB18), (CB19, CB19), (CB20, CB20),", "CB26 = 'Khao Manee' CB27 = 'Korat' CB28 = 'Maine Coon' CB29 =", "CB33), (CB34, CB34), (CB35, CB35), (DB00, DB00), (DB01, DB01), (DB02, DB02), (DB03, DB03),", "rodentBreeds = [ DE00, DE01, RB00, RB01, RB02, RB03, RB04, RB05, RB06, RB07,", "models.PositiveSmallIntegerField(null=True) size = models.CharField(max_length=1, choices=PET_SIZES, blank=False, null=False) sex = models.CharField(max_length=1, choices=PET_SEX, blank=False, null=False)", "otherBreed class Pet(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) image = models.ImageField(upload_to='pet_image', blank=False, null=False)", "def get_other_breeds(): otherBreed = [DE01,] return otherBreed class Pet(models.Model): user = models.ForeignKey(User, default=None,", "'Habana' CB23 = 'High Land Fold' CB24 = 'Himalaios' CB25 = 'Javaneses' CB26", "'Ragdoll' CB33 = '<NAME>' CB34 = 'Ragamuffin' CB35 = 'Ragdoll' # DOG BREED", "DB12, DB13, DB14, DB15, DB16, DB17, DB18, DB19, DB20, DB21, DB22, DB23, DB24,", "BB03 = 'Azulão' BB04 = 'Bavete' BB05 = 'Bicudo' BB06 = 'Cabloquinho' BB07", "= 'Azul Russo' CB06 = 'Balineses' CB07 = 'Bengalês' CB08 = 'Bobtail' CB09", "= 'Pastor alemão' DB24 = 'Pastor australianoPastor de Shetland' DB25 = 'Pequinês' DB26", "BB32 = 'Rouxinol' BB33 = 'S<NAME>' BB34 = 'Tangará' BB35 = 'Tico-tico' BB36", "BB24, BB25, BB26, BB27, BB28, BB29, BB30, BB31, BB32, BB33, BB34, BB35, ]", "= 'Balineses' CB07 = 'Bengalês' CB08 = 'Bobtail' CB09 = 'Bobtail Japonês' CB10", "(CB25, CB25), (CB26, CB26), (CB27, CB27), (CB28, CB28), (CB29, CB29), (CB30, CB30), (CB31,", "Fold' CB24 = 'Himalaios' CB25 = 'Javaneses' CB26 = 'Khao Manee' CB27 =", "(CB00, CB00), (CB01, CB01), (CB02, CB02), (CB03, CB03), (CB04, CB04), (CB05, CB05), (CB06,", "PET TYPE GATO = 'Gato' CACHORRO = 'Cachorro' PASSARO = 'Pássaro' ROEDOR =", "CB32, CB33, CB34, CB35, ] return catBreeds def get_dog_breeds(): dogBreeds = [ DE00,", "= models.BooleanField(default=False) isAdopted = models.BooleanField(default=False) pet_type = models.CharField(max_length=50, choices=TYPE_CHOICES) breed = models.CharField(max_length=50, choices=BREED_CHOICES)", "CB11 = 'British Shorthair' CB12 = 'Burmês' CB13 = 'Burmilla' CB14 = 'Califórinia", "BB25 = 'Pássaro Preto' BB26 = 'Patativa' BB27 = 'Perequito Autraliano' BB28 =", "(CB26, CB26), (CB27, CB27), (CB28, CB28), (CB29, CB29), (CB30, CB30), (CB31, CB31), (CB32,", "BB07, BB08, BB09, BB10, BB11, BB12, BB13, BB14, BB15, BB16, BB17, BB18, BB19,", "= 'Pug' DB30 = 'Rottweiler' DB31 = 'Shar-pei' DB32 = 'Shiba' DB33 =", "= 'Mainá' BB23 = 'Modesto' BB24 = 'Papagaio' BB25 = 'Pássaro Preto' BB26", "pelo comprido' CB02 = 'American Curl' CB03 = 'American Shorthair' CB04 = 'American", "# PET TYPE GATO = 'Gato' CACHORRO = 'Cachorro' PASSARO = 'Pássaro' ROEDOR", "BB12 = 'Coleiro' BB13 = 'Cordonbleu' BB14 = 'Coruja' BB15 = 'Curió' BB16", "(CB27, CB27), (CB28, CB28), (CB29, CB29), (CB30, CB30), (CB31, CB31), (CB32, CB32), (CB33,", "DB07), (DB08, DB08), (DB09, DB09), (DB10, DB10), (DB11, DB11), (DB12, DB12), (DB13, DB13),", "CB34 = 'Ragamuffin' CB35 = 'Ragdoll' # DOG BREED DB00 = 'Akita' DB01", "'Beagle' DB03 = 'Boiadeiro australiano' DB04 = 'Border collie' DB05 = 'Boston terrier'", "] def get_cat_breeds(): catBreeds = [ DE00, DE01, CB00, CB01, CB02, CB03, CB04,", "BB03, BB04, BB05, BB06, BB07, BB08, BB09, BB10, BB11, BB12, BB13, BB14, BB15,", "RB00 = 'Camundongo' RB01 = 'Chinchila' RB02 = 'Gerbil - Esquilo da MOngólia'", "= 'Pit bull' DB28 = 'Poodle' DB29 = 'Pug' DB30 = 'Rottweiler' DB31", "(BB15, BB15), (BB16, BB16), (BB17, BB17), (BB18, BB18), (BB19, BB19), (BB20, BB20), (BB21,", "('G', 'Grande')] PET_SEX = [('M', 'Macho'), ('F', 'Fêmea')] # PET TYPE GATO =", "(BB20, BB20), (BB21, BB21), (BB22, BB22), (BB23, BB23), (BB24, BB24), (BB25, BB25), (BB26,", "= 'Boiadeiro australiano' DB04 = 'Border collie' DB05 = 'Boston terrier' DB06 =", "= 'Pequinês' DB26 = 'Pinscher' DB27 = 'Pit bull' DB28 = 'Poodle' DB29", "] return catBreeds def get_dog_breeds(): dogBreeds = [ DE00, DE01, DB00, DB01, DB02,", "= 'Border collie' DB05 = 'Boston terrier' DB06 = 'Boxer' DB07 = 'Buldogue'", "= 'Calafete' BB09 = 'Calopsita' BB10 = 'Canário' BB11 = 'Cardeal' BB12 =", "CB07, CB08, CB09, CB10, CB11, CB12, CB13, CB14, CB15, CB16, CB17, CB18, CB19,", "(DB24, DB24), (DB25, DB25), (DB26, DB26), (DB27, DB27), (DB28, DB28), (DB29, DB29), (DB30,", "(BB01, BB01), (BB02, BB02), (BB03, BB03), (BB04, BB04), (BB05, BB05), (BB06, BB06), (BB07,", "DB13, DB14, DB15, DB16, DB17, DB18, DB19, DB20, DB21, DB22, DB23, DB24, DB25,", "(CB04, CB04), (CB05, CB05), (CB06, CB06), (CB07, CB07), (CB08, CB08), (CB09, CB09), (CB10,", "RB04), (RB05, RB05), (RB06, RB06), (RB07, RB07), ] def get_cat_breeds(): catBreeds = [", "DB00, DB01, DB02, DB03, DB04, DB05, DB06, DB07, DB08, DB09, DB10, DB11, DB12,", "= 'Chinchila' RB02 = 'Gerbil - Esquilo da MOngólia' RB03 = 'Hamster Anão", "= '<NAME>' CB34 = 'Ragamuffin' CB35 = 'Ragdoll' # DOG BREED DB00 =", "DB34), (DB35, DB35), (BB00, BB00), (BB01, BB01), (BB02, BB02), (BB03, BB03), (BB04, BB04),", "Rex' CB17 = 'Cymric' CB18 = 'Devon Rex' CB19 = 'Exóticos' CB20 =", "BB23, BB24, BB25, BB26, BB27, BB28, BB29, BB30, BB31, BB32, BB33, BB34, BB35,", "(CB31, CB31), (CB32, CB32), (CB33, CB33), (CB34, CB34), (CB35, CB35), (DB00, DB00), (DB01,", "(BB07, BB07), (BB08, BB08), (BB09, BB09), (BB10, BB10), (BB11, BB11), (BB12, BB12), (BB13,", "= [DE01,] return otherBreed class Pet(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) image =", "(CB20, CB20), (CB21, CB21), (CB22, CB22), (CB23, CB23), (CB24, CB24), (CB25, CB25), (CB26,", "= [ DE00, DE01, DB00, DB01, DB02, DB03, DB04, DB05, DB06, DB07, DB08,", "CB32), (CB33, CB33), (CB34, CB34), (CB35, CB35), (DB00, DB00), (DB01, DB01), (DB02, DB02),", "terrier' DB06 = 'Boxer' DB07 = 'Buldogue' DB08 = 'Bull terrier' DB09 =", "name = models.CharField(max_length=30, blank=False, null=False) description = models.CharField(max_length=500, blank=False, null=False) age = models.PositiveSmallIntegerField(null=True)", "BB08 = 'Calafete' BB09 = 'Calopsita' BB10 = 'Canário' BB11 = 'Cardeal' BB12", "CB09), (CB10, CB10), (CB11, CB11), (CB12, CB12), (CB13, CB13), (CB14, CB14), (CB15, CB15),", "'Pintassilgo' BB30 = 'Pombo' BB31 = 'Rolinha' BB32 = 'Rouxinol' BB33 = 'S<NAME>'", "'Weimaraner' DB35 = 'Yorkshire' # BIRD BREED BB00 = 'Agapornis' BB01 = 'Araponga'", "DB11), (DB12, DB12), (DB13, DB13), (DB14, DB14), (DB15, DB15), (DB16, DB16), (DB17, DB17),", "'Cabloquinho' BB07 = 'Cacatua' BB08 = 'Calafete' BB09 = 'Calopsita' BB10 = 'Canário'", "models.BooleanField(default=False) dewormed = models.BooleanField(default=False) vulnerable = models.BooleanField(default=False) isAdopted = models.BooleanField(default=False) pet_type = models.CharField(max_length=50,", "(DB15, DB15), (DB16, DB16), (DB17, DB17), (DB18, DB18), (DB19, DB19), (DB20, DB20), (DB21,", "models.CharField(max_length=500, blank=False, null=False) age = models.PositiveSmallIntegerField(null=True) size = models.CharField(max_length=1, choices=PET_SIZES, blank=False, null=False) sex", "rodentBreeds def get_other_breeds(): otherBreed = [DE01,] return otherBreed class Pet(models.Model): user = models.ForeignKey(User,", "= 'Lulu da pomerânia' DB22 = 'Maltês' DB23 = 'Pastor alemão' DB24 =", "BB26), (BB27, BB27), (BB28, BB28), (BB29, BB29), (BB30, BB30), (BB31, BB31), (BB32, BB32),", "'Cachorro' PASSARO = 'Pássaro' ROEDOR = 'Roedor' OUTRO = 'Outro' # DEFAULT DE00", "(CB18, CB18), (CB19, CB19), (CB20, CB20), (CB21, CB21), (CB22, CB22), (CB23, CB23), (CB24,", "choices=PET_SEX, blank=False, null=False) vaccinated = models.BooleanField(default=False) castrated = models.BooleanField(default=False) dewormed = models.BooleanField(default=False) vulnerable", "(DE00, DE00), (DE01, DE01), (CB00, CB00), (CB01, CB01), (CB02, CB02), (CB03, CB03), (CB04,", "alemão' DB24 = 'Pastor australianoPastor de Shetland' DB25 = 'Pequinês' DB26 = 'Pinscher'", "'Mecol - Twister' RB06 = 'Porquinho da índia' RB07 = 'Topolino' TYPE_CHOICES =", "= [ DE00, DE01, RB00, RB01, RB02, RB03, RB04, RB05, RB06, RB07, ]", "DB06, DB07, DB08, DB09, DB10, DB11, DB12, DB13, DB14, DB15, DB16, DB17, DB18,", "'Shar-pei' DB32 = 'Shiba' DB33 = 'Shih tzu' DB34 = 'Weimaraner' DB35 =", "Sírio' RB05 = 'Mecol - Twister' RB06 = 'Porquinho da índia' RB07 =", "DEFAULT DE00 = 'Sem raça definida' DE01 = 'Outra' # CAT BREED CB00", "DE00, DE01, RB00, RB01, RB02, RB03, RB04, RB05, RB06, RB07, ] return rodentBreeds", "DB06), (DB07, DB07), (DB08, DB08), (DB09, DB09), (DB10, DB10), (DB11, DB11), (DB12, DB12),", "DB31, DB32, DB33, DB34, DB35, ] return dogBreeds def get_bird_breeds(): birdBreeds = [", "CB02), (CB03, CB03), (CB04, CB04), (CB05, CB05), (CB06, CB06), (CB07, CB07), (CB08, CB08),", "'Pequinês' DB26 = 'Pinscher' DB27 = 'Pit bull' DB28 = 'Poodle' DB29 =", "(DB29, DB29), (DB30, DB30), (DB31, DB31), (DB32, DB32), (DB33, DB33), (DB34, DB34), (DB35,", "RB07), ] def get_cat_breeds(): catBreeds = [ DE00, DE01, CB00, CB01, CB02, CB03,", "CB28, CB29, CB30, CB31, CB32, CB33, CB34, CB35, ] return catBreeds def get_dog_breeds():", "= 'Tangará' BB35 = 'Tico-tico' BB36 = 'Tucano' # RODENT BREED RB00 =", "castrated = models.BooleanField(default=False) dewormed = models.BooleanField(default=False) vulnerable = models.BooleanField(default=False) isAdopted = models.BooleanField(default=False) pet_type", "CB26), (CB27, CB27), (CB28, CB28), (CB29, CB29), (CB30, CB30), (CB31, CB31), (CB32, CB32),", "BREED CB00 = 'Abssínios' CB01 = 'Alemão de pelo comprido' CB02 = 'American", "MOngólia' RB03 = 'Hamster Anão Russo' RB04 = 'Hamster Sírio' RB05 = 'Mecol", "CB15), (CB16, CB16), (CB17, CB17), (CB18, CB18), (CB19, CB19), (CB20, CB20), (CB21, CB21),", "DB16), (DB17, DB17), (DB18, DB18), (DB19, DB19), (DB20, DB20), (DB21, DB21), (DB22, DB22),", "CB05), (CB06, CB06), (CB07, CB07), (CB08, CB08), (CB09, CB09), (CB10, CB10), (CB11, CB11),", "CB23 = 'High Land Fold' CB24 = 'Himalaios' CB25 = 'Javaneses' CB26 =", "= 'Gato' CACHORRO = 'Cachorro' PASSARO = 'Pássaro' ROEDOR = 'Roedor' OUTRO =", "CB28 = 'Maine Coon' CB29 = 'Manx' CB30 = '<NAME>' CB31 = '<NAME>'", "= 'British Shorthair' CB12 = 'Burmês' CB13 = 'Burmilla' CB14 = 'Califórinia Spangled'", "RODENT BREED RB00 = 'Camundongo' RB01 = 'Chinchila' RB02 = 'Gerbil - Esquilo", "(DB26, DB26), (DB27, DB27), (DB28, DB28), (DB29, DB29), (DB30, DB30), (DB31, DB31), (DB32,", "= models.CharField(max_length=1, choices=PET_SEX, blank=False, null=False) vaccinated = models.BooleanField(default=False) castrated = models.BooleanField(default=False) dewormed =", "blank=False, null=False) vaccinated = models.BooleanField(default=False) castrated = models.BooleanField(default=False) dewormed = models.BooleanField(default=False) vulnerable =", "RB00, RB01, RB02, RB03, RB04, RB05, RB06, RB07, ] return rodentBreeds def get_other_breeds():", "(DB23, DB23), (DB24, DB24), (DB25, DB25), (DB26, DB26), (DB27, DB27), (DB28, DB28), (DB29,", "RB05 = 'Mecol - Twister' RB06 = 'Porquinho da índia' RB07 = 'Topolino'", "= 'Dogue alemão' DB15 = 'Fila brasileiro' DB16 = 'Golden retriever' DB17 =", "CB27 = 'Korat' CB28 = 'Maine Coon' CB29 = 'Manx' CB30 = '<NAME>'", "= 'Pombo' BB31 = 'Rolinha' BB32 = 'Rouxinol' BB33 = 'S<NAME>' BB34 =", "CB22 = 'Habana' CB23 = 'High Land Fold' CB24 = 'Himalaios' CB25 =", "= 'Ragdoll' # DOG BREED DB00 = 'Akita' DB01 = 'Basset hound' DB02", "'Labrador' DB20 = 'Lhasa apso' DB21 = 'Lulu da pomerânia' DB22 = 'Maltês'", "(BB31, BB31), (BB32, BB32), (BB33, BB33), (BB34, BB34), (BB35, BB35), (RB00, RB00), (RB01,", "[DE01,] return otherBreed class Pet(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) image = models.ImageField(upload_to='pet_image',", "on_delete=models.CASCADE) image = models.ImageField(upload_to='pet_image', blank=False, null=False) name = models.CharField(max_length=30, blank=False, null=False) description =", "CB35), (DB00, DB00), (DB01, DB01), (DB02, DB02), (DB03, DB03), (DB04, DB04), (DB05, DB05),", "BB22 = 'Mainá' BB23 = 'Modesto' BB24 = 'Papagaio' BB25 = 'Pássaro Preto'", "RB02), (RB03, RB03), (RB04, RB04), (RB05, RB05), (RB06, RB06), (RB07, RB07), ] def", "CB15, CB16, CB17, CB18, CB19, CB20, CB21, CB22, CB23, CB24, CB25, CB26, CB27,", "'Diamante Mandarin' BB17 = 'Dominó' BB18 = 'Explêndido' BB19 = 'Granatina' BB20 =", "CB35, ] return catBreeds def get_dog_breeds(): dogBreeds = [ DE00, DE01, DB00, DB01,", "DB14 = 'Dogue alemão' DB15 = 'Fila brasileiro' DB16 = 'Golden retriever' DB17", "'Rouxinol' BB33 = 'S<NAME>' BB34 = 'Tangará' BB35 = 'Tico-tico' BB36 = 'Tucano'", "'Cardeal' BB12 = 'Coleiro' BB13 = 'Cordonbleu' BB14 = 'Coruja' BB15 = 'Curió'", "CB23, CB24, CB25, CB26, CB27, CB28, CB29, CB30, CB31, CB32, CB33, CB34, CB35,", "DB10), (DB11, DB11), (DB12, DB12), (DB13, DB13), (DB14, DB14), (DB15, DB15), (DB16, DB16),", "= [(GATO, GATO), (CACHORRO, CACHORRO), (PASSARO, PASSARO), (ROEDOR, ROEDOR), (OUTRO, OUTRO),] BREED_CHOICES =", "'Pombo' BB31 = 'Rolinha' BB32 = 'Rouxinol' BB33 = 'S<NAME>' BB34 = 'Tangará'", "DE00, DE01, CB00, CB01, CB02, CB03, CB04, CB05, CB06, CB07, CB08, CB09, CB10,", "BB15 = 'Curió' BB16 = 'Diamante Mandarin' BB17 = 'Dominó' BB18 = 'Explêndido'", "DE00 = 'Sem raça definida' DE01 = 'Outra' # CAT BREED CB00 =", "sex = models.CharField(max_length=1, choices=PET_SEX, blank=False, null=False) vaccinated = models.BooleanField(default=False) castrated = models.BooleanField(default=False) dewormed", "'Macho'), ('F', 'Fêmea')] # PET TYPE GATO = 'Gato' CACHORRO = 'Cachorro' PASSARO", "CB13), (CB14, CB14), (CB15, CB15), (CB16, CB16), (CB17, CB17), (CB18, CB18), (CB19, CB19),", "DE01), (CB00, CB00), (CB01, CB01), (CB02, CB02), (CB03, CB03), (CB04, CB04), (CB05, CB05),", "= 'Javaneses' CB26 = 'Khao Manee' CB27 = 'Korat' CB28 = 'Maine Coon'", "'Dogue alemão' DB15 = 'Fila brasileiro' DB16 = 'Golden retriever' DB17 = 'Husky", "BB09, BB10, BB11, BB12, BB13, BB14, BB15, BB16, BB17, BB18, BB19, BB20, BB21,", "BB29, BB30, BB31, BB32, BB33, BB34, BB35, ] return birdBreeds def get_rodent_breeds(): rodentBreeds", "CB08), (CB09, CB09), (CB10, CB10), (CB11, CB11), (CB12, CB12), (CB13, CB13), (CB14, CB14),", "CB17 = 'Cymric' CB18 = 'Devon Rex' CB19 = 'Exóticos' CB20 = 'Foldex'", "CB13 = 'Burmilla' CB14 = 'Califórinia Spangled' CB15 = 'Chartreux' CB16 = 'Cornish", "vulnerable = models.BooleanField(default=False) isAdopted = models.BooleanField(default=False) pet_type = models.CharField(max_length=50, choices=TYPE_CHOICES) breed = models.CharField(max_length=50,", "BB06), (BB07, BB07), (BB08, BB08), (BB09, BB09), (BB10, BB10), (BB11, BB11), (BB12, BB12),", "BREED RB00 = 'Camundongo' RB01 = 'Chinchila' RB02 = 'Gerbil - Esquilo da", "'Tico-tico' BB36 = 'Tucano' # RODENT BREED RB00 = 'Camundongo' RB01 = 'Chinchila'", "'Hamster Anão Russo' RB04 = 'Hamster Sírio' RB05 = 'Mecol - Twister' RB06", "(RB03, RB03), (RB04, RB04), (RB05, RB05), (RB06, RB06), (RB07, RB07), ] def get_cat_breeds():", "= 'Jandaia' BB21 = 'Lóris' BB22 = 'Mainá' BB23 = 'Modesto' BB24 =", "DB30, DB31, DB32, DB33, DB34, DB35, ] return dogBreeds def get_bird_breeds(): birdBreeds =", "(RB04, RB04), (RB05, RB05), (RB06, RB06), (RB07, RB07), ] def get_cat_breeds(): catBreeds =", "(BB14, BB14), (BB15, BB15), (BB16, BB16), (BB17, BB17), (BB18, BB18), (BB19, BB19), (BB20,", "# DOG BREED DB00 = 'Akita' DB01 = 'Basset hound' DB02 = 'Beagle'", "BB10 = 'Canário' BB11 = 'Cardeal' BB12 = 'Coleiro' BB13 = 'Cordonbleu' BB14", "(CB21, CB21), (CB22, CB22), (CB23, CB23), (CB24, CB24), (CB25, CB25), (CB26, CB26), (CB27,", "DB15 = 'Fila brasileiro' DB16 = 'Golden retriever' DB17 = 'Husky siberiano' DB18", "(CB12, CB12), (CB13, CB13), (CB14, CB14), (CB15, CB15), (CB16, CB16), (CB17, CB17), (CB18,", "CB29), (CB30, CB30), (CB31, CB31), (CB32, CB32), (CB33, CB33), (CB34, CB34), (CB35, CB35),", "null=False) vaccinated = models.BooleanField(default=False) castrated = models.BooleanField(default=False) dewormed = models.BooleanField(default=False) vulnerable = models.BooleanField(default=False)", "CB03, CB04, CB05, CB06, CB07, CB08, CB09, CB10, CB11, CB12, CB13, CB14, CB15,", "(DB07, DB07), (DB08, DB08), (DB09, DB09), (DB10, DB10), (DB11, DB11), (DB12, DB12), (DB13,", "user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) image = models.ImageField(upload_to='pet_image', blank=False, null=False) name = models.CharField(max_length=30,", "CB04, CB05, CB06, CB07, CB08, CB09, CB10, CB11, CB12, CB13, CB14, CB15, CB16,", "'Curió' BB16 = 'Diamante Mandarin' BB17 = 'Dominó' BB18 = 'Explêndido' BB19 =", "(DB31, DB31), (DB32, DB32), (DB33, DB33), (DB34, DB34), (DB35, DB35), (BB00, BB00), (BB01,", "BB21 = 'Lóris' BB22 = 'Mainá' BB23 = 'Modesto' BB24 = 'Papagaio' BB25", "DB13 = 'Dogo argentino' DB14 = 'Dogue alemão' DB15 = 'Fila brasileiro' DB16", "return birdBreeds def get_rodent_breeds(): rodentBreeds = [ DE00, DE01, RB00, RB01, RB02, RB03,", "('F', 'Fêmea')] # PET TYPE GATO = 'Gato' CACHORRO = 'Cachorro' PASSARO =", "Rex' CB22 = 'Habana' CB23 = 'High Land Fold' CB24 = 'Himalaios' CB25", "= 'Mecol - Twister' RB06 = 'Porquinho da índia' RB07 = 'Topolino' TYPE_CHOICES", "CB19), (CB20, CB20), (CB21, CB21), (CB22, CB22), (CB23, CB23), (CB24, CB24), (CB25, CB25),", "from django.db import models PET_SIZES = [('P', 'Pequeno'), ('M', 'Médio'), ('G', 'Grande')] PET_SEX", "DB25), (DB26, DB26), (DB27, DB27), (DB28, DB28), (DB29, DB29), (DB30, DB30), (DB31, DB31),", "'Himalaios' CB25 = 'Javaneses' CB26 = 'Khao Manee' CB27 = 'Korat' CB28 =", "(CB29, CB29), (CB30, CB30), (CB31, CB31), (CB32, CB32), (CB33, CB33), (CB34, CB34), (CB35,", "DB28 = 'Poodle' DB29 = 'Pug' DB30 = 'Rottweiler' DB31 = 'Shar-pei' DB32", "CB30 = '<NAME>' CB31 = '<NAME>' CB32 = 'Ragdoll' CB33 = '<NAME>' CB34", "'Outra' # CAT BREED CB00 = 'Abssínios' CB01 = 'Alemão de pelo comprido'", "null=False) sex = models.CharField(max_length=1, choices=PET_SEX, blank=False, null=False) vaccinated = models.BooleanField(default=False) castrated = models.BooleanField(default=False)", "(RB00, RB00), (RB01, RB01), (RB02, RB02), (RB03, RB03), (RB04, RB04), (RB05, RB05), (RB06,", "DB29 = 'Pug' DB30 = 'Rottweiler' DB31 = 'Shar-pei' DB32 = 'Shiba' DB33", "= 'Azulão' BB04 = 'Bavete' BB05 = 'Bicudo' BB06 = 'Cabloquinho' BB07 =", "DB32, DB33, DB34, DB35, ] return dogBreeds def get_bird_breeds(): birdBreeds = [ DE00,", "(CB16, CB16), (CB17, CB17), (CB18, CB18), (CB19, CB19), (CB20, CB20), (CB21, CB21), (CB22,", "= 'Boston terrier' DB06 = 'Boxer' DB07 = 'Buldogue' DB08 = 'Bull terrier'", "BB33), (BB34, BB34), (BB35, BB35), (RB00, RB00), (RB01, RB01), (RB02, RB02), (RB03, RB03),", "BB00 = 'Agapornis' BB01 = 'Araponga' BB02 = 'Arara' BB03 = 'Azulão' BB04", "(BB16, BB16), (BB17, BB17), (BB18, BB18), (BB19, BB19), (BB20, BB20), (BB21, BB21), (BB22,", "'Arara' BB03 = 'Azulão' BB04 = 'Bavete' BB05 = 'Bicudo' BB06 = 'Cabloquinho'", "CB16, CB17, CB18, CB19, CB20, CB21, CB22, CB23, CB24, CB25, CB26, CB27, CB28,", "(CB05, CB05), (CB06, CB06), (CB07, CB07), (CB08, CB08), (CB09, CB09), (CB10, CB10), (CB11,", "(DB03, DB03), (DB04, DB04), (DB05, DB05), (DB06, DB06), (DB07, DB07), (DB08, DB08), (DB09,", "apso' DB21 = 'Lulu da pomerânia' DB22 = 'Maltês' DB23 = 'Pastor alemão'", "(BB13, BB13), (BB14, BB14), (BB15, BB15), (BB16, BB16), (BB17, BB17), (BB18, BB18), (BB19,", "RB01 = 'Chinchila' RB02 = 'Gerbil - Esquilo da MOngólia' RB03 = 'Hamster", "(DB14, DB14), (DB15, DB15), (DB16, DB16), (DB17, DB17), (DB18, DB18), (DB19, DB19), (DB20,", "RB03, RB04, RB05, RB06, RB07, ] return rodentBreeds def get_other_breeds(): otherBreed = [DE01,]", "CB22), (CB23, CB23), (CB24, CB24), (CB25, CB25), (CB26, CB26), (CB27, CB27), (CB28, CB28),", "(BB11, BB11), (BB12, BB12), (BB13, BB13), (BB14, BB14), (BB15, BB15), (BB16, BB16), (BB17,", "'Coleiro' BB13 = 'Cordonbleu' BB14 = 'Coruja' BB15 = 'Curió' BB16 = 'Diamante", "ROEDOR = 'Roedor' OUTRO = 'Outro' # DEFAULT DE00 = 'Sem raça definida'", "CB12 = 'Burmês' CB13 = 'Burmilla' CB14 = 'Califórinia Spangled' CB15 = 'Chartreux'", "= 'Cordonbleu' BB14 = 'Coruja' BB15 = 'Curió' BB16 = 'Diamante Mandarin' BB17", "(DB19, DB19), (DB20, DB20), (DB21, DB21), (DB22, DB22), (DB23, DB23), (DB24, DB24), (DB25,", "DB25 = 'Pequinês' DB26 = 'Pinscher' DB27 = 'Pit bull' DB28 = 'Poodle'", "(BB06, BB06), (BB07, BB07), (BB08, BB08), (BB09, BB09), (BB10, BB10), (BB11, BB11), (BB12,", "BB03), (BB04, BB04), (BB05, BB05), (BB06, BB06), (BB07, BB07), (BB08, BB08), (BB09, BB09),", "BB18), (BB19, BB19), (BB20, BB20), (BB21, BB21), (BB22, BB22), (BB23, BB23), (BB24, BB24),", "RB04 = 'Hamster Sírio' RB05 = 'Mecol - Twister' RB06 = 'Porquinho da", "= 'Bengalês' CB08 = 'Bobtail' CB09 = 'Bobtail Japonês' CB10 = 'Bombay' CB11", "= [('P', 'Pequeno'), ('M', 'Médio'), ('G', 'Grande')] PET_SEX = [('M', 'Macho'), ('F', 'Fêmea')]", "= 'Modesto' BB24 = 'Papagaio' BB25 = 'Pássaro Preto' BB26 = 'Patativa' BB27", "'Burmês' CB13 = 'Burmilla' CB14 = 'Califórinia Spangled' CB15 = 'Chartreux' CB16 =", "DB29), (DB30, DB30), (DB31, DB31), (DB32, DB32), (DB33, DB33), (DB34, DB34), (DB35, DB35),", "= 'Pintassilgo' BB30 = 'Pombo' BB31 = 'Rolinha' BB32 = 'Rouxinol' BB33 =", "'Gato' CACHORRO = 'Cachorro' PASSARO = 'Pássaro' ROEDOR = 'Roedor' OUTRO = 'Outro'", "'Pássaro Preto' BB26 = 'Patativa' BB27 = 'Perequito Autraliano' BB28 = 'Pica-pau' BB29", "= 'Bobtail' CB09 = 'Bobtail Japonês' CB10 = 'Bombay' CB11 = 'British Shorthair'", "GATO), (CACHORRO, CACHORRO), (PASSARO, PASSARO), (ROEDOR, ROEDOR), (OUTRO, OUTRO),] BREED_CHOICES = [ (DE00,", "(CB07, CB07), (CB08, CB08), (CB09, CB09), (CB10, CB10), (CB11, CB11), (CB12, CB12), (CB13,", "BB19 = 'Granatina' BB20 = 'Jandaia' BB21 = 'Lóris' BB22 = 'Mainá' BB23", "'Lóris' BB22 = 'Mainá' BB23 = 'Modesto' BB24 = 'Papagaio' BB25 = 'Pássaro", "DB19, DB20, DB21, DB22, DB23, DB24, DB25, DB26, DB27, DB28, DB29, DB30, DB31,", "= 'Porquinho da índia' RB07 = 'Topolino' TYPE_CHOICES = [(GATO, GATO), (CACHORRO, CACHORRO),", "= 'Hamster Sírio' RB05 = 'Mecol - Twister' RB06 = 'Porquinho da índia'", "= 'Coleiro' BB13 = 'Cordonbleu' BB14 = 'Coruja' BB15 = 'Curió' BB16 =", "RB02 = 'Gerbil - Esquilo da MOngólia' RB03 = 'Hamster Anão Russo' RB04", "(BB19, BB19), (BB20, BB20), (BB21, BB21), (BB22, BB22), (BB23, BB23), (BB24, BB24), (BB25,", "DB08 = 'Bull terrier' DB09 = 'Chihuahua' DB10 = 'Chow chow' DB11 =", "] return dogBreeds def get_bird_breeds(): birdBreeds = [ DE00, DE01, BB00, BB01, BB02,", "otherBreed = [DE01,] return otherBreed class Pet(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) image", "DE01, CB00, CB01, CB02, CB03, CB04, CB05, CB06, CB07, CB08, CB09, CB10, CB11,", "[ DE00, DE01, DB00, DB01, DB02, DB03, DB04, DB05, DB06, DB07, DB08, DB09,", "DB31), (DB32, DB32), (DB33, DB33), (DB34, DB34), (DB35, DB35), (BB00, BB00), (BB01, BB01),", "- Esquilo da MOngólia' RB03 = 'Hamster Anão Russo' RB04 = 'Hamster Sírio'", "BB28, BB29, BB30, BB31, BB32, BB33, BB34, BB35, ] return birdBreeds def get_rodent_breeds():", "Twister' RB06 = 'Porquinho da índia' RB07 = 'Topolino' TYPE_CHOICES = [(GATO, GATO),", "= 'Shiba' DB33 = 'Shih tzu' DB34 = 'Weimaraner' DB35 = 'Yorkshire' #", "BB23 = 'Modesto' BB24 = 'Papagaio' BB25 = 'Pássaro Preto' BB26 = 'Patativa'", "= 'Buldogue' DB08 = 'Bull terrier' DB09 = 'Chihuahua' DB10 = 'Chow chow'", "= [('M', 'Macho'), ('F', 'Fêmea')] # PET TYPE GATO = 'Gato' CACHORRO =", "tzu' DB34 = 'Weimaraner' DB35 = 'Yorkshire' # BIRD BREED BB00 = 'Agapornis'", "DB18), (DB19, DB19), (DB20, DB20), (DB21, DB21), (DB22, DB22), (DB23, DB23), (DB24, DB24),", "'Lhasa apso' DB21 = 'Lulu da pomerânia' DB22 = 'Maltês' DB23 = 'Pastor", "CB01, CB02, CB03, CB04, CB05, CB06, CB07, CB08, CB09, CB10, CB11, CB12, CB13,", "BB30), (BB31, BB31), (BB32, BB32), (BB33, BB33), (BB34, BB34), (BB35, BB35), (RB00, RB00),", "get_bird_breeds(): birdBreeds = [ DE00, DE01, BB00, BB01, BB02, BB03, BB04, BB05, BB06,", "(DB27, DB27), (DB28, DB28), (DB29, DB29), (DB30, DB30), (DB31, DB31), (DB32, DB32), (DB33,", "BB31 = 'Rolinha' BB32 = 'Rouxinol' BB33 = 'S<NAME>' BB34 = 'Tangará' BB35", "hound' DB02 = 'Beagle' DB03 = 'Boiadeiro australiano' DB04 = 'Border collie' DB05", "'Explêndido' BB19 = 'Granatina' BB20 = 'Jandaia' BB21 = 'Lóris' BB22 = 'Mainá'", "CB06), (CB07, CB07), (CB08, CB08), (CB09, CB09), (CB10, CB10), (CB11, CB11), (CB12, CB12),", "= 'Outro' # DEFAULT DE00 = 'Sem raça definida' DE01 = 'Outra' #", "CB28), (CB29, CB29), (CB30, CB30), (CB31, CB31), (CB32, CB32), (CB33, CB33), (CB34, CB34),", "CB07), (CB08, CB08), (CB09, CB09), (CB10, CB10), (CB11, CB11), (CB12, CB12), (CB13, CB13),", "argentino' DB14 = 'Dogue alemão' DB15 = 'Fila brasileiro' DB16 = 'Golden retriever'", "BB06 = 'Cabloquinho' BB07 = 'Cacatua' BB08 = 'Calafete' BB09 = 'Calopsita' BB10", "CB11), (CB12, CB12), (CB13, CB13), (CB14, CB14), (CB15, CB15), (CB16, CB16), (CB17, CB17),", "(CB33, CB33), (CB34, CB34), (CB35, CB35), (DB00, DB00), (DB01, DB01), (DB02, DB02), (DB03,", "RB06), (RB07, RB07), ] def get_cat_breeds(): catBreeds = [ DE00, DE01, CB00, CB01,", "= models.CharField(max_length=30, blank=False, null=False) description = models.CharField(max_length=500, blank=False, null=False) age = models.PositiveSmallIntegerField(null=True) size", "CB08, CB09, CB10, CB11, CB12, CB13, CB14, CB15, CB16, CB17, CB18, CB19, CB20,", "DB12 = 'Doberman' DB13 = 'Dogo argentino' DB14 = 'Dogue alemão' DB15 =", "BB30, BB31, BB32, BB33, BB34, BB35, ] return birdBreeds def get_rodent_breeds(): rodentBreeds =", "= models.PositiveSmallIntegerField(null=True) size = models.CharField(max_length=1, choices=PET_SIZES, blank=False, null=False) sex = models.CharField(max_length=1, choices=PET_SEX, blank=False,", "= 'Canário' BB11 = 'Cardeal' BB12 = 'Coleiro' BB13 = 'Cordonbleu' BB14 =", "catBreeds = [ DE00, DE01, CB00, CB01, CB02, CB03, CB04, CB05, CB06, CB07,", "DB26), (DB27, DB27), (DB28, DB28), (DB29, DB29), (DB30, DB30), (DB31, DB31), (DB32, DB32),", "BB24 = 'Papagaio' BB25 = 'Pássaro Preto' BB26 = 'Patativa' BB27 = 'Perequito", "size = models.CharField(max_length=1, choices=PET_SIZES, blank=False, null=False) sex = models.CharField(max_length=1, choices=PET_SEX, blank=False, null=False) vaccinated", "RB06 = 'Porquinho da índia' RB07 = 'Topolino' TYPE_CHOICES = [(GATO, GATO), (CACHORRO,", "BB04 = 'Bavete' BB05 = 'Bicudo' BB06 = 'Cabloquinho' BB07 = 'Cacatua' BB08", "CB21), (CB22, CB22), (CB23, CB23), (CB24, CB24), (CB25, CB25), (CB26, CB26), (CB27, CB27),", "DB23, DB24, DB25, DB26, DB27, DB28, DB29, DB30, DB31, DB32, DB33, DB34, DB35,", "australianoPastor de Shetland' DB25 = 'Pequinês' DB26 = 'Pinscher' DB27 = 'Pit bull'", "= 'Lhasa apso' DB21 = 'Lulu da pomerânia' DB22 = 'Maltês' DB23 =", "'Rottweiler' DB31 = 'Shar-pei' DB32 = 'Shiba' DB33 = 'Shih tzu' DB34 =", "(CB02, CB02), (CB03, CB03), (CB04, CB04), (CB05, CB05), (CB06, CB06), (CB07, CB07), (CB08,", "(DB22, DB22), (DB23, DB23), (DB24, DB24), (DB25, DB25), (DB26, DB26), (DB27, DB27), (DB28,", "Land Fold' CB24 = 'Himalaios' CB25 = 'Javaneses' CB26 = 'Khao Manee' CB27", "'Califórinia Spangled' CB15 = 'Chartreux' CB16 = 'Cornish Rex' CB17 = 'Cymric' CB18", "BB26, BB27, BB28, BB29, BB30, BB31, BB32, BB33, BB34, BB35, ] return birdBreeds", "GATO = 'Gato' CACHORRO = 'Cachorro' PASSARO = 'Pássaro' ROEDOR = 'Roedor' OUTRO", "CB15 = 'Chartreux' CB16 = 'Cornish Rex' CB17 = 'Cymric' CB18 = 'Devon", "DB33 = 'Shih tzu' DB34 = 'Weimaraner' DB35 = 'Yorkshire' # BIRD BREED", "CB21 = 'German Rex' CB22 = 'Habana' CB23 = 'High Land Fold' CB24", "CB06 = 'Balineses' CB07 = 'Bengalês' CB08 = 'Bobtail' CB09 = 'Bobtail Japonês'", "BB34 = 'Tangará' BB35 = 'Tico-tico' BB36 = 'Tucano' # RODENT BREED RB00", "'Pug' DB30 = 'Rottweiler' DB31 = 'Shar-pei' DB32 = 'Shiba' DB33 = 'Shih", "DB07, DB08, DB09, DB10, DB11, DB12, DB13, DB14, DB15, DB16, DB17, DB18, DB19,", "DB27), (DB28, DB28), (DB29, DB29), (DB30, DB30), (DB31, DB31), (DB32, DB32), (DB33, DB33),", "CB18 = 'Devon Rex' CB19 = 'Exóticos' CB20 = 'Foldex' CB21 = 'German", "CB27, CB28, CB29, CB30, CB31, CB32, CB33, CB34, CB35, ] return catBreeds def", "dogBreeds def get_bird_breeds(): birdBreeds = [ DE00, DE01, BB00, BB01, BB02, BB03, BB04,", "'Ragdoll' # DOG BREED DB00 = 'Akita' DB01 = 'Basset hound' DB02 =", "= 'Outra' # CAT BREED CB00 = 'Abssínios' CB01 = 'Alemão de pelo", "(DB30, DB30), (DB31, DB31), (DB32, DB32), (DB33, DB33), (DB34, DB34), (DB35, DB35), (BB00,", "Mandarin' BB17 = 'Dominó' BB18 = 'Explêndido' BB19 = 'Granatina' BB20 = 'Jandaia'", "'Shih tzu' DB34 = 'Weimaraner' DB35 = 'Yorkshire' # BIRD BREED BB00 =", "BB02 = 'Arara' BB03 = 'Azulão' BB04 = 'Bavete' BB05 = 'Bicudo' BB06", "'Chartreux' CB16 = 'Cornish Rex' CB17 = 'Cymric' CB18 = 'Devon Rex' CB19", "'Bicudo' BB06 = 'Cabloquinho' BB07 = 'Cacatua' BB08 = 'Calafete' BB09 = 'Calopsita'", "(DB10, DB10), (DB11, DB11), (DB12, DB12), (DB13, DB13), (DB14, DB14), (DB15, DB15), (DB16,", "BB09 = 'Calopsita' BB10 = 'Canário' BB11 = 'Cardeal' BB12 = 'Coleiro' BB13", "DB22), (DB23, DB23), (DB24, DB24), (DB25, DB25), (DB26, DB26), (DB27, DB27), (DB28, DB28),", "null=False) name = models.CharField(max_length=30, blank=False, null=False) description = models.CharField(max_length=500, blank=False, null=False) age =", "models.CharField(max_length=30, blank=False, null=False) description = models.CharField(max_length=500, blank=False, null=False) age = models.PositiveSmallIntegerField(null=True) size =", "DB13), (DB14, DB14), (DB15, DB15), (DB16, DB16), (DB17, DB17), (DB18, DB18), (DB19, DB19),", "'Boxer' DB07 = 'Buldogue' DB08 = 'Bull terrier' DB09 = 'Chihuahua' DB10 =", "= 'Rolinha' BB32 = 'Rouxinol' BB33 = 'S<NAME>' BB34 = 'Tangará' BB35 =", "CB14, CB15, CB16, CB17, CB18, CB19, CB20, CB21, CB22, CB23, CB24, CB25, CB26,", "= 'Bavete' BB05 = 'Bicudo' BB06 = 'Cabloquinho' BB07 = 'Cacatua' BB08 =", "BB36 = 'Tucano' # RODENT BREED RB00 = 'Camundongo' RB01 = 'Chinchila' RB02", "'Alemão de pelo comprido' CB02 = 'American Curl' CB03 = 'American Shorthair' CB04", "'Gerbil - Esquilo da MOngólia' RB03 = 'Hamster Anão Russo' RB04 = 'Hamster", "australiano' DB04 = 'Border collie' DB05 = 'Boston terrier' DB06 = 'Boxer' DB07", "'Canário' BB11 = 'Cardeal' BB12 = 'Coleiro' BB13 = 'Cordonbleu' BB14 = 'Coruja'", "(DB32, DB32), (DB33, DB33), (DB34, DB34), (DB35, DB35), (BB00, BB00), (BB01, BB01), (BB02,", "DB05), (DB06, DB06), (DB07, DB07), (DB08, DB08), (DB09, DB09), (DB10, DB10), (DB11, DB11),", "'Cordonbleu' BB14 = 'Coruja' BB15 = 'Curió' BB16 = 'Diamante Mandarin' BB17 =", "'Granatina' BB20 = 'Jandaia' BB21 = 'Lóris' BB22 = 'Mainá' BB23 = 'Modesto'", "(BB02, BB02), (BB03, BB03), (BB04, BB04), (BB05, BB05), (BB06, BB06), (BB07, BB07), (BB08,", "CB31), (CB32, CB32), (CB33, CB33), (CB34, CB34), (CB35, CB35), (DB00, DB00), (DB01, DB01),", "Curl' CB03 = 'American Shorthair' CB04 = 'American Wirehair' CB05 = 'Azul Russo'", "'Lulu da pomerânia' DB22 = 'Maltês' DB23 = 'Pastor alemão' DB24 = 'Pastor", "BB20 = 'Jandaia' BB21 = 'Lóris' BB22 = 'Mainá' BB23 = 'Modesto' BB24", "raça definida' DE01 = 'Outra' # CAT BREED CB00 = 'Abssínios' CB01 =", "'Boiadeiro australiano' DB04 = 'Border collie' DB05 = 'Boston terrier' DB06 = 'Boxer'", "DB33), (DB34, DB34), (DB35, DB35), (BB00, BB00), (BB01, BB01), (BB02, BB02), (BB03, BB03),", "DB30), (DB31, DB31), (DB32, DB32), (DB33, DB33), (DB34, DB34), (DB35, DB35), (BB00, BB00),", "(CB13, CB13), (CB14, CB14), (CB15, CB15), (CB16, CB16), (CB17, CB17), (CB18, CB18), (CB19,", "= 'Explêndido' BB19 = 'Granatina' BB20 = 'Jandaia' BB21 = 'Lóris' BB22 =", "DB04 = 'Border collie' DB05 = 'Boston terrier' DB06 = 'Boxer' DB07 =", "DB05, DB06, DB07, DB08, DB09, DB10, DB11, DB12, DB13, DB14, DB15, DB16, DB17,", "DB11, DB12, DB13, DB14, DB15, DB16, DB17, DB18, DB19, DB20, DB21, DB22, DB23,", "'Akita' DB01 = 'Basset hound' DB02 = 'Beagle' DB03 = 'Boiadeiro australiano' DB04", "OUTRO = 'Outro' # DEFAULT DE00 = 'Sem raça definida' DE01 = 'Outra'", "BB25), (BB26, BB26), (BB27, BB27), (BB28, BB28), (BB29, BB29), (BB30, BB30), (BB31, BB31),", "(DB02, DB02), (DB03, DB03), (DB04, DB04), (DB05, DB05), (DB06, DB06), (DB07, DB07), (DB08,", "DB02), (DB03, DB03), (DB04, DB04), (DB05, DB05), (DB06, DB06), (DB07, DB07), (DB08, DB08),", "índia' RB07 = 'Topolino' TYPE_CHOICES = [(GATO, GATO), (CACHORRO, CACHORRO), (PASSARO, PASSARO), (ROEDOR,", "models.ImageField(upload_to='pet_image', blank=False, null=False) name = models.CharField(max_length=30, blank=False, null=False) description = models.CharField(max_length=500, blank=False, null=False)", "= 'American Wirehair' CB05 = 'Azul Russo' CB06 = 'Balineses' CB07 = 'Bengalês'", "'Border collie' DB05 = 'Boston terrier' DB06 = 'Boxer' DB07 = 'Buldogue' DB08", "(BB35, BB35), (RB00, RB00), (RB01, RB01), (RB02, RB02), (RB03, RB03), (RB04, RB04), (RB05,", "BB13), (BB14, BB14), (BB15, BB15), (BB16, BB16), (BB17, BB17), (BB18, BB18), (BB19, BB19),", "[ DE00, DE01, CB00, CB01, CB02, CB03, CB04, CB05, CB06, CB07, CB08, CB09,", "Coon' CB29 = 'Manx' CB30 = '<NAME>' CB31 = '<NAME>' CB32 = 'Ragdoll'", "BB02, BB03, BB04, BB05, BB06, BB07, BB08, BB09, BB10, BB11, BB12, BB13, BB14,", "models.BooleanField(default=False) castrated = models.BooleanField(default=False) dewormed = models.BooleanField(default=False) vulnerable = models.BooleanField(default=False) isAdopted = models.BooleanField(default=False)", "'Dálmata' DB12 = 'Doberman' DB13 = 'Dogo argentino' DB14 = 'Dogue alemão' DB15", "[('P', 'Pequeno'), ('M', 'Médio'), ('G', 'Grande')] PET_SEX = [('M', 'Macho'), ('F', 'Fêmea')] #", "= 'Ragdoll' CB33 = '<NAME>' CB34 = 'Ragamuffin' CB35 = 'Ragdoll' # DOG", "'Chinchila' RB02 = 'Gerbil - Esquilo da MOngólia' RB03 = 'Hamster Anão Russo'", "User from django.db import models PET_SIZES = [('P', 'Pequeno'), ('M', 'Médio'), ('G', 'Grande')]", "BB11), (BB12, BB12), (BB13, BB13), (BB14, BB14), (BB15, BB15), (BB16, BB16), (BB17, BB17),", "DE01, BB00, BB01, BB02, BB03, BB04, BB05, BB06, BB07, BB08, BB09, BB10, BB11,", "= 'Pássaro Preto' BB26 = 'Patativa' BB27 = 'Perequito Autraliano' BB28 = 'Pica-pau'", "alemão' DB15 = 'Fila brasileiro' DB16 = 'Golden retriever' DB17 = 'Husky siberiano'", "= 'Yorkshire' # BIRD BREED BB00 = 'Agapornis' BB01 = 'Araponga' BB02 =", "= 'Rouxinol' BB33 = 'S<NAME>' BB34 = 'Tangará' BB35 = 'Tico-tico' BB36 =", "DB17 = 'Husky siberiano' DB18 = '<NAME>' DB19 = 'Labrador' DB20 = 'Lhasa", "BB09), (BB10, BB10), (BB11, BB11), (BB12, BB12), (BB13, BB13), (BB14, BB14), (BB15, BB15),", "BB20), (BB21, BB21), (BB22, BB22), (BB23, BB23), (BB24, BB24), (BB25, BB25), (BB26, BB26),", "'S<NAME>' BB34 = 'Tangará' BB35 = 'Tico-tico' BB36 = 'Tucano' # RODENT BREED", "CB19, CB20, CB21, CB22, CB23, CB24, CB25, CB26, CB27, CB28, CB29, CB30, CB31,", "DB32 = 'Shiba' DB33 = 'Shih tzu' DB34 = 'Weimaraner' DB35 = 'Yorkshire'", "= 'Pinscher' DB27 = 'Pit bull' DB28 = 'Poodle' DB29 = 'Pug' DB30", "= 'Hamster Anão Russo' RB04 = 'Hamster Sírio' RB05 = 'Mecol - Twister'", "'Husky siberiano' DB18 = '<NAME>' DB19 = 'Labrador' DB20 = 'Lhasa apso' DB21", "= 'Bicudo' BB06 = 'Cabloquinho' BB07 = 'Cacatua' BB08 = 'Calafete' BB09 =", "CB34, CB35, ] return catBreeds def get_dog_breeds(): dogBreeds = [ DE00, DE01, DB00,", "DB03, DB04, DB05, DB06, DB07, DB08, DB09, DB10, DB11, DB12, DB13, DB14, DB15,", "DB24, DB25, DB26, DB27, DB28, DB29, DB30, DB31, DB32, DB33, DB34, DB35, ]", "DB32), (DB33, DB33), (DB34, DB34), (DB35, DB35), (BB00, BB00), (BB01, BB01), (BB02, BB02),", "= 'High Land Fold' CB24 = 'Himalaios' CB25 = 'Javaneses' CB26 = 'Khao", "'Camundongo' RB01 = 'Chinchila' RB02 = 'Gerbil - Esquilo da MOngólia' RB03 =", "= 'American Curl' CB03 = 'American Shorthair' CB04 = 'American Wirehair' CB05 =", "BB35), (RB00, RB00), (RB01, RB01), (RB02, RB02), (RB03, RB03), (RB04, RB04), (RB05, RB05),", "models.CharField(max_length=1, choices=PET_SIZES, blank=False, null=False) sex = models.CharField(max_length=1, choices=PET_SEX, blank=False, null=False) vaccinated = models.BooleanField(default=False)", "'Boston terrier' DB06 = 'Boxer' DB07 = 'Buldogue' DB08 = 'Bull terrier' DB09", "= 'Agapornis' BB01 = 'Araponga' BB02 = 'Arara' BB03 = 'Azulão' BB04 =", "CB30), (CB31, CB31), (CB32, CB32), (CB33, CB33), (CB34, CB34), (CB35, CB35), (DB00, DB00),", "CB06, CB07, CB08, CB09, CB10, CB11, CB12, CB13, CB14, CB15, CB16, CB17, CB18,", "'<NAME>' DB19 = 'Labrador' DB20 = 'Lhasa apso' DB21 = 'Lulu da pomerânia'", "Esquilo da MOngólia' RB03 = 'Hamster Anão Russo' RB04 = 'Hamster Sírio' RB05", "RB06, RB07, ] return rodentBreeds def get_other_breeds(): otherBreed = [DE01,] return otherBreed class", "DB16, DB17, DB18, DB19, DB20, DB21, DB22, DB23, DB24, DB25, DB26, DB27, DB28,", "= 'Foldex' CB21 = 'German Rex' CB22 = 'Habana' CB23 = 'High Land", "BB01 = 'Araponga' BB02 = 'Arara' BB03 = 'Azulão' BB04 = 'Bavete' BB05", "'Javaneses' CB26 = 'Khao Manee' CB27 = 'Korat' CB28 = 'Maine Coon' CB29", "= 'Chartreux' CB16 = 'Cornish Rex' CB17 = 'Cymric' CB18 = 'Devon Rex'", "(BB10, BB10), (BB11, BB11), (BB12, BB12), (BB13, BB13), (BB14, BB14), (BB15, BB15), (BB16,", "CB12, CB13, CB14, CB15, CB16, CB17, CB18, CB19, CB20, CB21, CB22, CB23, CB24,", "vaccinated = models.BooleanField(default=False) castrated = models.BooleanField(default=False) dewormed = models.BooleanField(default=False) vulnerable = models.BooleanField(default=False) isAdopted", "BB27 = 'Perequito Autraliano' BB28 = 'Pica-pau' BB29 = 'Pintassilgo' BB30 = 'Pombo'", "= 'Korat' CB28 = 'Maine Coon' CB29 = 'Manx' CB30 = '<NAME>' CB31", "RB07 = 'Topolino' TYPE_CHOICES = [(GATO, GATO), (CACHORRO, CACHORRO), (PASSARO, PASSARO), (ROEDOR, ROEDOR),", "null=False) age = models.PositiveSmallIntegerField(null=True) size = models.CharField(max_length=1, choices=PET_SIZES, blank=False, null=False) sex = models.CharField(max_length=1,", "'Azul Russo' CB06 = 'Balineses' CB07 = 'Bengalês' CB08 = 'Bobtail' CB09 =", "'Bobtail' CB09 = 'Bobtail Japonês' CB10 = 'Bombay' CB11 = 'British Shorthair' CB12", "de pelo comprido' CB02 = 'American Curl' CB03 = 'American Shorthair' CB04 =", "= [ DE00, DE01, CB00, CB01, CB02, CB03, CB04, CB05, CB06, CB07, CB08,", "DB19 = 'Labrador' DB20 = 'Lhasa apso' DB21 = 'Lulu da pomerânia' DB22", "(CB28, CB28), (CB29, CB29), (CB30, CB30), (CB31, CB31), (CB32, CB32), (CB33, CB33), (CB34,", "ROEDOR), (OUTRO, OUTRO),] BREED_CHOICES = [ (DE00, DE00), (DE01, DE01), (CB00, CB00), (CB01,", "'Dominó' BB18 = 'Explêndido' BB19 = 'Granatina' BB20 = 'Jandaia' BB21 = 'Lóris'", "'Golden retriever' DB17 = 'Husky siberiano' DB18 = '<NAME>' DB19 = 'Labrador' DB20", "PET_SEX = [('M', 'Macho'), ('F', 'Fêmea')] # PET TYPE GATO = 'Gato' CACHORRO", "(BB05, BB05), (BB06, BB06), (BB07, BB07), (BB08, BB08), (BB09, BB09), (BB10, BB10), (BB11,", "CB09, CB10, CB11, CB12, CB13, CB14, CB15, CB16, CB17, CB18, CB19, CB20, CB21,", "'Rolinha' BB32 = 'Rouxinol' BB33 = 'S<NAME>' BB34 = 'Tangará' BB35 = 'Tico-tico'", "DB20), (DB21, DB21), (DB22, DB22), (DB23, DB23), (DB24, DB24), (DB25, DB25), (DB26, DB26),", "CB17, CB18, CB19, CB20, CB21, CB22, CB23, CB24, CB25, CB26, CB27, CB28, CB29,", "= 'Curió' BB16 = 'Diamante Mandarin' BB17 = 'Dominó' BB18 = 'Explêndido' BB19", "'Pinscher' DB27 = 'Pit bull' DB28 = 'Poodle' DB29 = 'Pug' DB30 =", "DB23 = 'Pastor alemão' DB24 = 'Pastor australianoPastor de Shetland' DB25 = 'Pequinês'", "(DB25, DB25), (DB26, DB26), (DB27, DB27), (DB28, DB28), (DB29, DB29), (DB30, DB30), (DB31,", "= models.BooleanField(default=False) dewormed = models.BooleanField(default=False) vulnerable = models.BooleanField(default=False) isAdopted = models.BooleanField(default=False) pet_type =", "DB17, DB18, DB19, DB20, DB21, DB22, DB23, DB24, DB25, DB26, DB27, DB28, DB29,", "'Cacatua' BB08 = 'Calafete' BB09 = 'Calopsita' BB10 = 'Canário' BB11 = 'Cardeal'", "(DB17, DB17), (DB18, DB18), (DB19, DB19), (DB20, DB20), (DB21, DB21), (DB22, DB22), (DB23,", "(DB33, DB33), (DB34, DB34), (DB35, DB35), (BB00, BB00), (BB01, BB01), (BB02, BB02), (BB03,", "(BB34, BB34), (BB35, BB35), (RB00, RB00), (RB01, RB01), (RB02, RB02), (RB03, RB03), (RB04,", "image = models.ImageField(upload_to='pet_image', blank=False, null=False) name = models.CharField(max_length=30, blank=False, null=False) description = models.CharField(max_length=500,", "def get_cat_breeds(): catBreeds = [ DE00, DE01, CB00, CB01, CB02, CB03, CB04, CB05,", "(CB34, CB34), (CB35, CB35), (DB00, DB00), (DB01, DB01), (DB02, DB02), (DB03, DB03), (DB04,", "- Twister' RB06 = 'Porquinho da índia' RB07 = 'Topolino' TYPE_CHOICES = [(GATO,", "BB11, BB12, BB13, BB14, BB15, BB16, BB17, BB18, BB19, BB20, BB21, BB22, BB23,", "BB30 = 'Pombo' BB31 = 'Rolinha' BB32 = 'Rouxinol' BB33 = 'S<NAME>' BB34", "DB18, DB19, DB20, DB21, DB22, DB23, DB24, DB25, DB26, DB27, DB28, DB29, DB30,", "BB15, BB16, BB17, BB18, BB19, BB20, BB21, BB22, BB23, BB24, BB25, BB26, BB27,", "'Fêmea')] # PET TYPE GATO = 'Gato' CACHORRO = 'Cachorro' PASSARO = 'Pássaro'", "BB08, BB09, BB10, BB11, BB12, BB13, BB14, BB15, BB16, BB17, BB18, BB19, BB20,", "# DEFAULT DE00 = 'Sem raça definida' DE01 = 'Outra' # CAT BREED", "CB02 = 'American Curl' CB03 = 'American Shorthair' CB04 = 'American Wirehair' CB05", "CB35 = 'Ragdoll' # DOG BREED DB00 = 'Akita' DB01 = 'Basset hound'", "CB17), (CB18, CB18), (CB19, CB19), (CB20, CB20), (CB21, CB21), (CB22, CB22), (CB23, CB23),", "(CB09, CB09), (CB10, CB10), (CB11, CB11), (CB12, CB12), (CB13, CB13), (CB14, CB14), (CB15,", "DB09), (DB10, DB10), (DB11, DB11), (DB12, DB12), (DB13, DB13), (DB14, DB14), (DB15, DB15),", "CB29 = 'Manx' CB30 = '<NAME>' CB31 = '<NAME>' CB32 = 'Ragdoll' CB33", "TYPE_CHOICES = [(GATO, GATO), (CACHORRO, CACHORRO), (PASSARO, PASSARO), (ROEDOR, ROEDOR), (OUTRO, OUTRO),] BREED_CHOICES", "(RB02, RB02), (RB03, RB03), (RB04, RB04), (RB05, RB05), (RB06, RB06), (RB07, RB07), ]", "BB24), (BB25, BB25), (BB26, BB26), (BB27, BB27), (BB28, BB28), (BB29, BB29), (BB30, BB30),", "DE01, RB00, RB01, RB02, RB03, RB04, RB05, RB06, RB07, ] return rodentBreeds def", "(BB26, BB26), (BB27, BB27), (BB28, BB28), (BB29, BB29), (BB30, BB30), (BB31, BB31), (BB32,", "'American Curl' CB03 = 'American Shorthair' CB04 = 'American Wirehair' CB05 = 'Azul", "(PASSARO, PASSARO), (ROEDOR, ROEDOR), (OUTRO, OUTRO),] BREED_CHOICES = [ (DE00, DE00), (DE01, DE01),", "'Ragamuffin' CB35 = 'Ragdoll' # DOG BREED DB00 = 'Akita' DB01 = 'Basset", "Preto' BB26 = 'Patativa' BB27 = 'Perequito Autraliano' BB28 = 'Pica-pau' BB29 =", "CB05, CB06, CB07, CB08, CB09, CB10, CB11, CB12, CB13, CB14, CB15, CB16, CB17,", "return rodentBreeds def get_other_breeds(): otherBreed = [DE01,] return otherBreed class Pet(models.Model): user =", "= 'Bobtail Japonês' CB10 = 'Bombay' CB11 = 'British Shorthair' CB12 = 'Burmês'", "BB12, BB13, BB14, BB15, BB16, BB17, BB18, BB19, BB20, BB21, BB22, BB23, BB24,", "= 'Bombay' CB11 = 'British Shorthair' CB12 = 'Burmês' CB13 = 'Burmilla' CB14", "RB03), (RB04, RB04), (RB05, RB05), (RB06, RB06), (RB07, RB07), ] def get_cat_breeds(): catBreeds", "= 'Manx' CB30 = '<NAME>' CB31 = '<NAME>' CB32 = 'Ragdoll' CB33 =", "DB16 = 'Golden retriever' DB17 = 'Husky siberiano' DB18 = '<NAME>' DB19 =", "CB10 = 'Bombay' CB11 = 'British Shorthair' CB12 = 'Burmês' CB13 = 'Burmilla'", "(BB00, BB00), (BB01, BB01), (BB02, BB02), (BB03, BB03), (BB04, BB04), (BB05, BB05), (BB06,", "BB19), (BB20, BB20), (BB21, BB21), (BB22, BB22), (BB23, BB23), (BB24, BB24), (BB25, BB25),", "CB10, CB11, CB12, CB13, CB14, CB15, CB16, CB17, CB18, CB19, CB20, CB21, CB22,", "DB04), (DB05, DB05), (DB06, DB06), (DB07, DB07), (DB08, DB08), (DB09, DB09), (DB10, DB10),", "BB06, BB07, BB08, BB09, BB10, BB11, BB12, BB13, BB14, BB15, BB16, BB17, BB18,", "CB34), (CB35, CB35), (DB00, DB00), (DB01, DB01), (DB02, DB02), (DB03, DB03), (DB04, DB04),", "'Bobtail Japonês' CB10 = 'Bombay' CB11 = 'British Shorthair' CB12 = 'Burmês' CB13", "DB18 = '<NAME>' DB19 = 'Labrador' DB20 = 'Lhasa apso' DB21 = 'Lulu", "blank=False, null=False) name = models.CharField(max_length=30, blank=False, null=False) description = models.CharField(max_length=500, blank=False, null=False) age", "'Maltês' DB23 = 'Pastor alemão' DB24 = 'Pastor australianoPastor de Shetland' DB25 =", "DB20 = 'Lhasa apso' DB21 = 'Lulu da pomerânia' DB22 = 'Maltês' DB23", "DB14, DB15, DB16, DB17, DB18, DB19, DB20, DB21, DB22, DB23, DB24, DB25, DB26,", "DB21 = 'Lulu da pomerânia' DB22 = 'Maltês' DB23 = 'Pastor alemão' DB24", "RB05, RB06, RB07, ] return rodentBreeds def get_other_breeds(): otherBreed = [DE01,] return otherBreed", "DB29, DB30, DB31, DB32, DB33, DB34, DB35, ] return dogBreeds def get_bird_breeds(): birdBreeds", "= [ DE00, DE01, BB00, BB01, BB02, BB03, BB04, BB05, BB06, BB07, BB08,", "BREED DB00 = 'Akita' DB01 = 'Basset hound' DB02 = 'Beagle' DB03 =", "BB13, BB14, BB15, BB16, BB17, BB18, BB19, BB20, BB21, BB22, BB23, BB24, BB25,", "(BB04, BB04), (BB05, BB05), (BB06, BB06), (BB07, BB07), (BB08, BB08), (BB09, BB09), (BB10,", "(BB24, BB24), (BB25, BB25), (BB26, BB26), (BB27, BB27), (BB28, BB28), (BB29, BB29), (BB30,", "(BB21, BB21), (BB22, BB22), (BB23, BB23), (BB24, BB24), (BB25, BB25), (BB26, BB26), (BB27,", "DB04, DB05, DB06, DB07, DB08, DB09, DB10, DB11, DB12, DB13, DB14, DB15, DB16,", "(BB22, BB22), (BB23, BB23), (BB24, BB24), (BB25, BB25), (BB26, BB26), (BB27, BB27), (BB28,", "'British Shorthair' CB12 = 'Burmês' CB13 = 'Burmilla' CB14 = 'Califórinia Spangled' CB15", "'Roedor' OUTRO = 'Outro' # DEFAULT DE00 = 'Sem raça definida' DE01 =", "(RB05, RB05), (RB06, RB06), (RB07, RB07), ] def get_cat_breeds(): catBreeds = [ DE00,", "= 'Exóticos' CB20 = 'Foldex' CB21 = 'German Rex' CB22 = 'Habana' CB23", "null=False) description = models.CharField(max_length=500, blank=False, null=False) age = models.PositiveSmallIntegerField(null=True) size = models.CharField(max_length=1, choices=PET_SIZES,", "CB24, CB25, CB26, CB27, CB28, CB29, CB30, CB31, CB32, CB33, CB34, CB35, ]", "get_dog_breeds(): dogBreeds = [ DE00, DE01, DB00, DB01, DB02, DB03, DB04, DB05, DB06,", "PASSARO = 'Pássaro' ROEDOR = 'Roedor' OUTRO = 'Outro' # DEFAULT DE00 =", "CB25, CB26, CB27, CB28, CB29, CB30, CB31, CB32, CB33, CB34, CB35, ] return", "CB03 = 'American Shorthair' CB04 = 'American Wirehair' CB05 = 'Azul Russo' CB06", "'Calopsita' BB10 = 'Canário' BB11 = 'Cardeal' BB12 = 'Coleiro' BB13 = 'Cordonbleu'", "choices=PET_SIZES, blank=False, null=False) sex = models.CharField(max_length=1, choices=PET_SEX, blank=False, null=False) vaccinated = models.BooleanField(default=False) castrated", "BB00, BB01, BB02, BB03, BB04, BB05, BB06, BB07, BB08, BB09, BB10, BB11, BB12,", "BB10), (BB11, BB11), (BB12, BB12), (BB13, BB13), (BB14, BB14), (BB15, BB15), (BB16, BB16),", "'Perequito Autraliano' BB28 = 'Pica-pau' BB29 = 'Pintassilgo' BB30 = 'Pombo' BB31 =", "[ (DE00, DE00), (DE01, DE01), (CB00, CB00), (CB01, CB01), (CB02, CB02), (CB03, CB03),", "CB14 = 'Califórinia Spangled' CB15 = 'Chartreux' CB16 = 'Cornish Rex' CB17 =", "BB27), (BB28, BB28), (BB29, BB29), (BB30, BB30), (BB31, BB31), (BB32, BB32), (BB33, BB33),", "'Chow chow' DB11 = 'Dálmata' DB12 = 'Doberman' DB13 = 'Dogo argentino' DB14", "('M', 'Médio'), ('G', 'Grande')] PET_SEX = [('M', 'Macho'), ('F', 'Fêmea')] # PET TYPE", "(CB15, CB15), (CB16, CB16), (CB17, CB17), (CB18, CB18), (CB19, CB19), (CB20, CB20), (CB21,", "'Balineses' CB07 = 'Bengalês' CB08 = 'Bobtail' CB09 = 'Bobtail Japonês' CB10 =", "DB01, DB02, DB03, DB04, DB05, DB06, DB07, DB08, DB09, DB10, DB11, DB12, DB13,", "BB18, BB19, BB20, BB21, BB22, BB23, BB24, BB25, BB26, BB27, BB28, BB29, BB30,", "= 'Fila brasileiro' DB16 = 'Golden retriever' DB17 = 'Husky siberiano' DB18 =", "'Mainá' BB23 = 'Modesto' BB24 = 'Papagaio' BB25 = 'Pássaro Preto' BB26 =", "BB16 = 'Diamante Mandarin' BB17 = 'Dominó' BB18 = 'Explêndido' BB19 = 'Granatina'", "(CB06, CB06), (CB07, CB07), (CB08, CB08), (CB09, CB09), (CB10, CB10), (CB11, CB11), (CB12,", "= models.CharField(max_length=1, choices=PET_SIZES, blank=False, null=False) sex = models.CharField(max_length=1, choices=PET_SEX, blank=False, null=False) vaccinated =", "(OUTRO, OUTRO),] BREED_CHOICES = [ (DE00, DE00), (DE01, DE01), (CB00, CB00), (CB01, CB01),", "BB22), (BB23, BB23), (BB24, BB24), (BB25, BB25), (BB26, BB26), (BB27, BB27), (BB28, BB28),", "BB08), (BB09, BB09), (BB10, BB10), (BB11, BB11), (BB12, BB12), (BB13, BB13), (BB14, BB14),", "CB25 = 'Javaneses' CB26 = 'Khao Manee' CB27 = 'Korat' CB28 = 'Maine", "DB33, DB34, DB35, ] return dogBreeds def get_bird_breeds(): birdBreeds = [ DE00, DE01,", "DB25, DB26, DB27, DB28, DB29, DB30, DB31, DB32, DB33, DB34, DB35, ] return", "(DB09, DB09), (DB10, DB10), (DB11, DB11), (DB12, DB12), (DB13, DB13), (DB14, DB14), (DB15,", "= 'American Shorthair' CB04 = 'American Wirehair' CB05 = 'Azul Russo' CB06 =", "= models.BooleanField(default=False) vulnerable = models.BooleanField(default=False) isAdopted = models.BooleanField(default=False) pet_type = models.CharField(max_length=50, choices=TYPE_CHOICES) breed", "CB16), (CB17, CB17), (CB18, CB18), (CB19, CB19), (CB20, CB20), (CB21, CB21), (CB22, CB22),", "BB02), (BB03, BB03), (BB04, BB04), (BB05, BB05), (BB06, BB06), (BB07, BB07), (BB08, BB08),", "(BB29, BB29), (BB30, BB30), (BB31, BB31), (BB32, BB32), (BB33, BB33), (BB34, BB34), (BB35,", "(CB22, CB22), (CB23, CB23), (CB24, CB24), (CB25, CB25), (CB26, CB26), (CB27, CB27), (CB28,", "DB09, DB10, DB11, DB12, DB13, DB14, DB15, DB16, DB17, DB18, DB19, DB20, DB21,", "BB23), (BB24, BB24), (BB25, BB25), (BB26, BB26), (BB27, BB27), (BB28, BB28), (BB29, BB29),", "CB13, CB14, CB15, CB16, CB17, CB18, CB19, CB20, CB21, CB22, CB23, CB24, CB25,", "RB03 = 'Hamster Anão Russo' RB04 = 'Hamster Sírio' RB05 = 'Mecol -", "blank=False, null=False) age = models.PositiveSmallIntegerField(null=True) size = models.CharField(max_length=1, choices=PET_SIZES, blank=False, null=False) sex =", "(BB23, BB23), (BB24, BB24), (BB25, BB25), (BB26, BB26), (BB27, BB27), (BB28, BB28), (BB29,", "'Porquinho da índia' RB07 = 'Topolino' TYPE_CHOICES = [(GATO, GATO), (CACHORRO, CACHORRO), (PASSARO,", "= 'German Rex' CB22 = 'Habana' CB23 = 'High Land Fold' CB24 =", "= 'Golden retriever' DB17 = 'Husky siberiano' DB18 = '<NAME>' DB19 = 'Labrador'", "DB10, DB11, DB12, DB13, DB14, DB15, DB16, DB17, DB18, DB19, DB20, DB21, DB22,", "BB34, BB35, ] return birdBreeds def get_rodent_breeds(): rodentBreeds = [ DE00, DE01, RB00,", "= 'S<NAME>' BB34 = 'Tangará' BB35 = 'Tico-tico' BB36 = 'Tucano' # RODENT", "RB02, RB03, RB04, RB05, RB06, RB07, ] return rodentBreeds def get_other_breeds(): otherBreed =", "= 'Basset hound' DB02 = 'Beagle' DB03 = 'Boiadeiro australiano' DB04 = 'Border", "models.BooleanField(default=False) vulnerable = models.BooleanField(default=False) isAdopted = models.BooleanField(default=False) pet_type = models.CharField(max_length=50, choices=TYPE_CHOICES) breed =", "= '<NAME>' DB19 = 'Labrador' DB20 = 'Lhasa apso' DB21 = 'Lulu da", "(DE01, DE01), (CB00, CB00), (CB01, CB01), (CB02, CB02), (CB03, CB03), (CB04, CB04), (CB05,", "'Bengalês' CB08 = 'Bobtail' CB09 = 'Bobtail Japonês' CB10 = 'Bombay' CB11 =", "return catBreeds def get_dog_breeds(): dogBreeds = [ DE00, DE01, DB00, DB01, DB02, DB03,", "DB35, ] return dogBreeds def get_bird_breeds(): birdBreeds = [ DE00, DE01, BB00, BB01,", "(CB11, CB11), (CB12, CB12), (CB13, CB13), (CB14, CB14), (CB15, CB15), (CB16, CB16), (CB17,", "'Devon Rex' CB19 = 'Exóticos' CB20 = 'Foldex' CB21 = 'German Rex' CB22", "CB23), (CB24, CB24), (CB25, CB25), (CB26, CB26), (CB27, CB27), (CB28, CB28), (CB29, CB29),", "Anão Russo' RB04 = 'Hamster Sírio' RB05 = 'Mecol - Twister' RB06 =", "= '<NAME>' CB31 = '<NAME>' CB32 = 'Ragdoll' CB33 = '<NAME>' CB34 =", "BB01), (BB02, BB02), (BB03, BB03), (BB04, BB04), (BB05, BB05), (BB06, BB06), (BB07, BB07),", "'Grande')] PET_SEX = [('M', 'Macho'), ('F', 'Fêmea')] # PET TYPE GATO = 'Gato'", "retriever' DB17 = 'Husky siberiano' DB18 = '<NAME>' DB19 = 'Labrador' DB20 =", "brasileiro' DB16 = 'Golden retriever' DB17 = 'Husky siberiano' DB18 = '<NAME>' DB19", "= 'Poodle' DB29 = 'Pug' DB30 = 'Rottweiler' DB31 = 'Shar-pei' DB32 =", "(BB17, BB17), (BB18, BB18), (BB19, BB19), (BB20, BB20), (BB21, BB21), (BB22, BB22), (BB23,", "DB23), (DB24, DB24), (DB25, DB25), (DB26, DB26), (DB27, DB27), (DB28, DB28), (DB29, DB29),", "(CB01, CB01), (CB02, CB02), (CB03, CB03), (CB04, CB04), (CB05, CB05), (CB06, CB06), (CB07,", "'Tangará' BB35 = 'Tico-tico' BB36 = 'Tucano' # RODENT BREED RB00 = 'Camundongo'", "BREED_CHOICES = [ (DE00, DE00), (DE01, DE01), (CB00, CB00), (CB01, CB01), (CB02, CB02),", "'Doberman' DB13 = 'Dogo argentino' DB14 = 'Dogue alemão' DB15 = 'Fila brasileiro'", "CB18, CB19, CB20, CB21, CB22, CB23, CB24, CB25, CB26, CB27, CB28, CB29, CB30,", "= 'Labrador' DB20 = 'Lhasa apso' DB21 = 'Lulu da pomerânia' DB22 =", "BB01, BB02, BB03, BB04, BB05, BB06, BB07, BB08, BB09, BB10, BB11, BB12, BB13,", "CB00, CB01, CB02, CB03, CB04, CB05, CB06, CB07, CB08, CB09, CB10, CB11, CB12,", "= 'Himalaios' CB25 = 'Javaneses' CB26 = 'Khao Manee' CB27 = 'Korat' CB28", "(BB27, BB27), (BB28, BB28), (BB29, BB29), (BB30, BB30), (BB31, BB31), (BB32, BB32), (BB33,", "DB31 = 'Shar-pei' DB32 = 'Shiba' DB33 = 'Shih tzu' DB34 = 'Weimaraner'", "'Calafete' BB09 = 'Calopsita' BB10 = 'Canário' BB11 = 'Cardeal' BB12 = 'Coleiro'", "(BB08, BB08), (BB09, BB09), (BB10, BB10), (BB11, BB11), (BB12, BB12), (BB13, BB13), (BB14,", "(CB32, CB32), (CB33, CB33), (CB34, CB34), (CB35, CB35), (DB00, DB00), (DB01, DB01), (DB02,", "'Pássaro' ROEDOR = 'Roedor' OUTRO = 'Outro' # DEFAULT DE00 = 'Sem raça", "(CB24, CB24), (CB25, CB25), (CB26, CB26), (CB27, CB27), (CB28, CB28), (CB29, CB29), (CB30,", "CB24), (CB25, CB25), (CB26, CB26), (CB27, CB27), (CB28, CB28), (CB29, CB29), (CB30, CB30),", "'Topolino' TYPE_CHOICES = [(GATO, GATO), (CACHORRO, CACHORRO), (PASSARO, PASSARO), (ROEDOR, ROEDOR), (OUTRO, OUTRO),]", "CB10), (CB11, CB11), (CB12, CB12), (CB13, CB13), (CB14, CB14), (CB15, CB15), (CB16, CB16),", "BB26 = 'Patativa' BB27 = 'Perequito Autraliano' BB28 = 'Pica-pau' BB29 = 'Pintassilgo'", "= 'Maltês' DB23 = 'Pastor alemão' DB24 = 'Pastor australianoPastor de Shetland' DB25", "CB19 = 'Exóticos' CB20 = 'Foldex' CB21 = 'German Rex' CB22 = 'Habana'", "[(GATO, GATO), (CACHORRO, CACHORRO), (PASSARO, PASSARO), (ROEDOR, ROEDOR), (OUTRO, OUTRO),] BREED_CHOICES = [", "import models PET_SIZES = [('P', 'Pequeno'), ('M', 'Médio'), ('G', 'Grande')] PET_SEX = [('M',", "= 'Pastor australianoPastor de Shetland' DB25 = 'Pequinês' DB26 = 'Pinscher' DB27 =", "'Foldex' CB21 = 'German Rex' CB22 = 'Habana' CB23 = 'High Land Fold'", "'Poodle' DB29 = 'Pug' DB30 = 'Rottweiler' DB31 = 'Shar-pei' DB32 = 'Shiba'", "= 'Dominó' BB18 = 'Explêndido' BB19 = 'Granatina' BB20 = 'Jandaia' BB21 =", "BB14), (BB15, BB15), (BB16, BB16), (BB17, BB17), (BB18, BB18), (BB19, BB19), (BB20, BB20),", "'High Land Fold' CB24 = 'Himalaios' CB25 = 'Javaneses' CB26 = 'Khao Manee'", "get_cat_breeds(): catBreeds = [ DE00, DE01, CB00, CB01, CB02, CB03, CB04, CB05, CB06,", "'Bombay' CB11 = 'British Shorthair' CB12 = 'Burmês' CB13 = 'Burmilla' CB14 =", "siberiano' DB18 = '<NAME>' DB19 = 'Labrador' DB20 = 'Lhasa apso' DB21 =", "DB17), (DB18, DB18), (DB19, DB19), (DB20, DB20), (DB21, DB21), (DB22, DB22), (DB23, DB23),", "DB06 = 'Boxer' DB07 = 'Buldogue' DB08 = 'Bull terrier' DB09 = 'Chihuahua'", "(DB13, DB13), (DB14, DB14), (DB15, DB15), (DB16, DB16), (DB17, DB17), (DB18, DB18), (DB19,", "DB00 = 'Akita' DB01 = 'Basset hound' DB02 = 'Beagle' DB03 = 'Boiadeiro", "CB30, CB31, CB32, CB33, CB34, CB35, ] return catBreeds def get_dog_breeds(): dogBreeds =", "DB11 = 'Dálmata' DB12 = 'Doberman' DB13 = 'Dogo argentino' DB14 = 'Dogue", "return otherBreed class Pet(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) image = models.ImageField(upload_to='pet_image', blank=False,", "'American Wirehair' CB05 = 'Azul Russo' CB06 = 'Balineses' CB07 = 'Bengalês' CB08", "= [ (DE00, DE00), (DE01, DE01), (CB00, CB00), (CB01, CB01), (CB02, CB02), (CB03,", "CB21, CB22, CB23, CB24, CB25, CB26, CB27, CB28, CB29, CB30, CB31, CB32, CB33,", "DB21, DB22, DB23, DB24, DB25, DB26, DB27, DB28, DB29, DB30, DB31, DB32, DB33,", "BB27, BB28, BB29, BB30, BB31, BB32, BB33, BB34, BB35, ] return birdBreeds def", "from users.models import User from django.db import models PET_SIZES = [('P', 'Pequeno'), ('M',", "(ROEDOR, ROEDOR), (OUTRO, OUTRO),] BREED_CHOICES = [ (DE00, DE00), (DE01, DE01), (CB00, CB00),", "DB01 = 'Basset hound' DB02 = 'Beagle' DB03 = 'Boiadeiro australiano' DB04 =", "'Manx' CB30 = '<NAME>' CB31 = '<NAME>' CB32 = 'Ragdoll' CB33 = '<NAME>'", "DB03), (DB04, DB04), (DB05, DB05), (DB06, DB06), (DB07, DB07), (DB08, DB08), (DB09, DB09),", "'<NAME>' CB34 = 'Ragamuffin' CB35 = 'Ragdoll' # DOG BREED DB00 = 'Akita'", "RB01), (RB02, RB02), (RB03, RB03), (RB04, RB04), (RB05, RB05), (RB06, RB06), (RB07, RB07),", "CB11, CB12, CB13, CB14, CB15, CB16, CB17, CB18, CB19, CB20, CB21, CB22, CB23,", "] return birdBreeds def get_rodent_breeds(): rodentBreeds = [ DE00, DE01, RB00, RB01, RB02,", "terrier' DB09 = 'Chihuahua' DB10 = 'Chow chow' DB11 = 'Dálmata' DB12 =", "return dogBreeds def get_bird_breeds(): birdBreeds = [ DE00, DE01, BB00, BB01, BB02, BB03,", "= 'Weimaraner' DB35 = 'Yorkshire' # BIRD BREED BB00 = 'Agapornis' BB01 =", "BB19, BB20, BB21, BB22, BB23, BB24, BB25, BB26, BB27, BB28, BB29, BB30, BB31,", "= 'Roedor' OUTRO = 'Outro' # DEFAULT DE00 = 'Sem raça definida' DE01", "DB02, DB03, DB04, DB05, DB06, DB07, DB08, DB09, DB10, DB11, DB12, DB13, DB14,", "(CB17, CB17), (CB18, CB18), (CB19, CB19), (CB20, CB20), (CB21, CB21), (CB22, CB22), (CB23,", "CACHORRO), (PASSARO, PASSARO), (ROEDOR, ROEDOR), (OUTRO, OUTRO),] BREED_CHOICES = [ (DE00, DE00), (DE01,", "DB07 = 'Buldogue' DB08 = 'Bull terrier' DB09 = 'Chihuahua' DB10 = 'Chow", "= 'Cabloquinho' BB07 = 'Cacatua' BB08 = 'Calafete' BB09 = 'Calopsita' BB10 =", "(CB19, CB19), (CB20, CB20), (CB21, CB21), (CB22, CB22), (CB23, CB23), (CB24, CB24), (CB25,", "Manee' CB27 = 'Korat' CB28 = 'Maine Coon' CB29 = 'Manx' CB30 =", "description = models.CharField(max_length=500, blank=False, null=False) age = models.PositiveSmallIntegerField(null=True) size = models.CharField(max_length=1, choices=PET_SIZES, blank=False,", "'Fila brasileiro' DB16 = 'Golden retriever' DB17 = 'Husky siberiano' DB18 = '<NAME>'", "CB08 = 'Bobtail' CB09 = 'Bobtail Japonês' CB10 = 'Bombay' CB11 = 'British", "= 'Husky siberiano' DB18 = '<NAME>' DB19 = 'Labrador' DB20 = 'Lhasa apso'", "BB14, BB15, BB16, BB17, BB18, BB19, BB20, BB21, BB22, BB23, BB24, BB25, BB26,", "DE01, DB00, DB01, DB02, DB03, DB04, DB05, DB06, DB07, DB08, DB09, DB10, DB11,", "BB35 = 'Tico-tico' BB36 = 'Tucano' # RODENT BREED RB00 = 'Camundongo' RB01", "(CB30, CB30), (CB31, CB31), (CB32, CB32), (CB33, CB33), (CB34, CB34), (CB35, CB35), (DB00,", "'Buldogue' DB08 = 'Bull terrier' DB09 = 'Chihuahua' DB10 = 'Chow chow' DB11", "models PET_SIZES = [('P', 'Pequeno'), ('M', 'Médio'), ('G', 'Grande')] PET_SEX = [('M', 'Macho'),", "= 'Diamante Mandarin' BB17 = 'Dominó' BB18 = 'Explêndido' BB19 = 'Granatina' BB20", "'Tucano' # RODENT BREED RB00 = 'Camundongo' RB01 = 'Chinchila' RB02 = 'Gerbil", "CB01), (CB02, CB02), (CB03, CB03), (CB04, CB04), (CB05, CB05), (CB06, CB06), (CB07, CB07),", "(BB12, BB12), (BB13, BB13), (BB14, BB14), (BB15, BB15), (BB16, BB16), (BB17, BB17), (BB18,", "CB26, CB27, CB28, CB29, CB30, CB31, CB32, CB33, CB34, CB35, ] return catBreeds", "DB30 = 'Rottweiler' DB31 = 'Shar-pei' DB32 = 'Shiba' DB33 = 'Shih tzu'", "DE00), (DE01, DE01), (CB00, CB00), (CB01, CB01), (CB02, CB02), (CB03, CB03), (CB04, CB04),", "CB04 = 'American Wirehair' CB05 = 'Azul Russo' CB06 = 'Balineses' CB07 =", "'Pastor australianoPastor de Shetland' DB25 = 'Pequinês' DB26 = 'Pinscher' DB27 = 'Pit", "DB35 = 'Yorkshire' # BIRD BREED BB00 = 'Agapornis' BB01 = 'Araponga' BB02", "DB00), (DB01, DB01), (DB02, DB02), (DB03, DB03), (DB04, DB04), (DB05, DB05), (DB06, DB06),", "DE01 = 'Outra' # CAT BREED CB00 = 'Abssínios' CB01 = 'Alemão de", "'Agapornis' BB01 = 'Araponga' BB02 = 'Arara' BB03 = 'Azulão' BB04 = 'Bavete'", "BB32, BB33, BB34, BB35, ] return birdBreeds def get_rodent_breeds(): rodentBreeds = [ DE00,", "(DB34, DB34), (DB35, DB35), (BB00, BB00), (BB01, BB01), (BB02, BB02), (BB03, BB03), (BB04,", "= 'Granatina' BB20 = 'Jandaia' BB21 = 'Lóris' BB22 = 'Mainá' BB23 =", "(CB14, CB14), (CB15, CB15), (CB16, CB16), (CB17, CB17), (CB18, CB18), (CB19, CB19), (CB20,", "BB21), (BB22, BB22), (BB23, BB23), (BB24, BB24), (BB25, BB25), (BB26, BB26), (BB27, BB27),", "BB29), (BB30, BB30), (BB31, BB31), (BB32, BB32), (BB33, BB33), (BB34, BB34), (BB35, BB35),", "CB29, CB30, CB31, CB32, CB33, CB34, CB35, ] return catBreeds def get_dog_breeds(): dogBreeds", "(CB10, CB10), (CB11, CB11), (CB12, CB12), (CB13, CB13), (CB14, CB14), (CB15, CB15), (CB16,", "= 'Rottweiler' DB31 = 'Shar-pei' DB32 = 'Shiba' DB33 = 'Shih tzu' DB34", "(DB06, DB06), (DB07, DB07), (DB08, DB08), (DB09, DB09), (DB10, DB10), (DB11, DB11), (DB12,", "CB00 = 'Abssínios' CB01 = 'Alemão de pelo comprido' CB02 = 'American Curl'", "= models.BooleanField(default=False) castrated = models.BooleanField(default=False) dewormed = models.BooleanField(default=False) vulnerable = models.BooleanField(default=False) isAdopted =", "DE00, DE01, BB00, BB01, BB02, BB03, BB04, BB05, BB06, BB07, BB08, BB09, BB10,", "BB33 = 'S<NAME>' BB34 = 'Tangará' BB35 = 'Tico-tico' BB36 = 'Tucano' #", "BB18 = 'Explêndido' BB19 = 'Granatina' BB20 = 'Jandaia' BB21 = 'Lóris' BB22", "'Dogo argentino' DB14 = 'Dogue alemão' DB15 = 'Fila brasileiro' DB16 = 'Golden", "users.models import User from django.db import models PET_SIZES = [('P', 'Pequeno'), ('M', 'Médio'),", "BB05 = 'Bicudo' BB06 = 'Cabloquinho' BB07 = 'Cacatua' BB08 = 'Calafete' BB09", "(DB16, DB16), (DB17, DB17), (DB18, DB18), (DB19, DB19), (DB20, DB20), (DB21, DB21), (DB22,", "'Patativa' BB27 = 'Perequito Autraliano' BB28 = 'Pica-pau' BB29 = 'Pintassilgo' BB30 =", "DB34, DB35, ] return dogBreeds def get_bird_breeds(): birdBreeds = [ DE00, DE01, BB00,", "= 'Coruja' BB15 = 'Curió' BB16 = 'Diamante Mandarin' BB17 = 'Dominó' BB18", "= models.ImageField(upload_to='pet_image', blank=False, null=False) name = models.CharField(max_length=30, blank=False, null=False) description = models.CharField(max_length=500, blank=False,", "(BB03, BB03), (BB04, BB04), (BB05, BB05), (BB06, BB06), (BB07, BB07), (BB08, BB08), (BB09,", "= 'Habana' CB23 = 'High Land Fold' CB24 = 'Himalaios' CB25 = 'Javaneses'" ]
[ "int(claim.split(' ')[3].split('x')[0]) b = int(claim.split(' ')[3].split('x')[1]) for i in range(x, x + a):", "int(claim.split(' ')[2].split(',')[1][:-1]) a = int(claim.split(' ')[3].split('x')[0]) b = int(claim.split(' ')[3].split('x')[1]) for i in", "in range(y, y + b): if canvas[i][j] > 1: return False # If", "False # If we get here the claim is intact! return True for", "in range(1000)] for y in range(1000)] for claim in lines: x = int(claim.split('", "x + a): for j in range(y, y + b): canvas[i][j] += 1", "get here the claim is intact! return True for claim in lines: if", "i in range(x, x + a): for j in range(y, y + b):", "y in range(1000)] for claim in lines: x = int(claim.split(' ')[2].split(',')[0]) y =", "checkIntact(canvas, claim): x = int(claim.split(' ')[2].split(',')[0]) y = int(claim.split(' ')[2].split(',')[1][:-1]) a = int(claim.split('", "lines: x = int(claim.split(' ')[2].split(',')[0]) y = int(claim.split(' ')[2].split(',')[1][:-1]) a = int(claim.split(' ')[3].split('x')[0])", "= int(claim.split(' ')[3].split('x')[1]) for i in range(x, x + a): for j in", "for j in range(y, y + b): canvas[i][j] += 1 def checkIntact(canvas, claim):", "intact! return True for claim in lines: if checkIntact(canvas, claim): print(claim.split(' ')[0][1:]) break", "y + b): if canvas[i][j] > 1: return False # If we get", "for claim in lines: x = int(claim.split(' ')[2].split(',')[0]) y = int(claim.split(' ')[2].split(',')[1][:-1]) a", "x in range(1000)] for y in range(1000)] for claim in lines: x =", "def checkIntact(canvas, claim): x = int(claim.split(' ')[2].split(',')[0]) y = int(claim.split(' ')[2].split(',')[1][:-1]) a =", "b = int(claim.split(' ')[3].split('x')[1]) for i in range(x, x + a): for j", "#!/usr/bin/env python3.7 import sys lines = [claim.rstrip('\\n') for claim in open(sys.argv[1])] canvas =", "claim is intact! return True for claim in lines: if checkIntact(canvas, claim): print(claim.split('", "# If we get here the claim is intact! return True for claim", "j in range(y, y + b): canvas[i][j] += 1 def checkIntact(canvas, claim): x", "j in range(y, y + b): if canvas[i][j] > 1: return False #", "range(y, y + b): if canvas[i][j] > 1: return False # If we", "claim in lines: x = int(claim.split(' ')[2].split(',')[0]) y = int(claim.split(' ')[2].split(',')[1][:-1]) a =", "we get here the claim is intact! return True for claim in lines:", "import sys lines = [claim.rstrip('\\n') for claim in open(sys.argv[1])] canvas = [[0 for", "sys lines = [claim.rstrip('\\n') for claim in open(sys.argv[1])] canvas = [[0 for x", "1 def checkIntact(canvas, claim): x = int(claim.split(' ')[2].split(',')[0]) y = int(claim.split(' ')[2].split(',')[1][:-1]) a", "open(sys.argv[1])] canvas = [[0 for x in range(1000)] for y in range(1000)] for", "[claim.rstrip('\\n') for claim in open(sys.argv[1])] canvas = [[0 for x in range(1000)] for", "in open(sys.argv[1])] canvas = [[0 for x in range(1000)] for y in range(1000)]", "if canvas[i][j] > 1: return False # If we get here the claim", "claim in open(sys.argv[1])] canvas = [[0 for x in range(1000)] for y in", "for y in range(1000)] for claim in lines: x = int(claim.split(' ')[2].split(',')[0]) y", "canvas = [[0 for x in range(1000)] for y in range(1000)] for claim", "= int(claim.split(' ')[2].split(',')[0]) y = int(claim.split(' ')[2].split(',')[1][:-1]) a = int(claim.split(' ')[3].split('x')[0]) b =", "y = int(claim.split(' ')[2].split(',')[1][:-1]) a = int(claim.split(' ')[3].split('x')[0]) b = int(claim.split(' ')[3].split('x')[1]) for", "in range(x, x + a): for j in range(y, y + b): if", "int(claim.split(' ')[3].split('x')[1]) for i in range(x, x + a): for j in range(y,", "b): canvas[i][j] += 1 def checkIntact(canvas, claim): x = int(claim.split(' ')[2].split(',')[0]) y =", "1: return False # If we get here the claim is intact! return", "a = int(claim.split(' ')[3].split('x')[0]) b = int(claim.split(' ')[3].split('x')[1]) for i in range(x, x", "in range(x, x + a): for j in range(y, y + b): canvas[i][j]", "canvas[i][j] > 1: return False # If we get here the claim is", "+ a): for j in range(y, y + b): if canvas[i][j] > 1:", "x = int(claim.split(' ')[2].split(',')[0]) y = int(claim.split(' ')[2].split(',')[1][:-1]) a = int(claim.split(' ')[3].split('x')[0]) b", "= int(claim.split(' ')[3].split('x')[0]) b = int(claim.split(' ')[3].split('x')[1]) for i in range(x, x +", "')[2].split(',')[0]) y = int(claim.split(' ')[2].split(',')[1][:-1]) a = int(claim.split(' ')[3].split('x')[0]) b = int(claim.split(' ')[3].split('x')[1])", "in range(1000)] for claim in lines: x = int(claim.split(' ')[2].split(',')[0]) y = int(claim.split('", "a): for j in range(y, y + b): if canvas[i][j] > 1: return", "')[2].split(',')[1][:-1]) a = int(claim.split(' ')[3].split('x')[0]) b = int(claim.split(' ')[3].split('x')[1]) for i in range(x,", "for i in range(x, x + a): for j in range(y, y +", "range(y, y + b): canvas[i][j] += 1 def checkIntact(canvas, claim): x = int(claim.split('", "is intact! return True for claim in lines: if checkIntact(canvas, claim): print(claim.split(' ')[0][1:])", "x + a): for j in range(y, y + b): if canvas[i][j] >", "range(1000)] for claim in lines: x = int(claim.split(' ')[2].split(',')[0]) y = int(claim.split(' ')[2].split(',')[1][:-1])", "+ b): if canvas[i][j] > 1: return False # If we get here", "int(claim.split(' ')[2].split(',')[0]) y = int(claim.split(' ')[2].split(',')[1][:-1]) a = int(claim.split(' ')[3].split('x')[0]) b = int(claim.split('", "return False # If we get here the claim is intact! return True", "')[3].split('x')[1]) for i in range(x, x + a): for j in range(y, y", "range(1000)] for y in range(1000)] for claim in lines: x = int(claim.split(' ')[2].split(',')[0])", "b): if canvas[i][j] > 1: return False # If we get here the", "= [claim.rstrip('\\n') for claim in open(sys.argv[1])] canvas = [[0 for x in range(1000)]", "= [[0 for x in range(1000)] for y in range(1000)] for claim in", "+ a): for j in range(y, y + b): canvas[i][j] += 1 def", "a): for j in range(y, y + b): canvas[i][j] += 1 def checkIntact(canvas,", "If we get here the claim is intact! return True for claim in", "range(x, x + a): for j in range(y, y + b): if canvas[i][j]", "for claim in open(sys.argv[1])] canvas = [[0 for x in range(1000)] for y", "lines = [claim.rstrip('\\n') for claim in open(sys.argv[1])] canvas = [[0 for x in", "the claim is intact! return True for claim in lines: if checkIntact(canvas, claim):", "in range(y, y + b): canvas[i][j] += 1 def checkIntact(canvas, claim): x =", "for j in range(y, y + b): if canvas[i][j] > 1: return False", "= int(claim.split(' ')[2].split(',')[1][:-1]) a = int(claim.split(' ')[3].split('x')[0]) b = int(claim.split(' ')[3].split('x')[1]) for i", "canvas[i][j] += 1 def checkIntact(canvas, claim): x = int(claim.split(' ')[2].split(',')[0]) y = int(claim.split('", "here the claim is intact! return True for claim in lines: if checkIntact(canvas,", "[[0 for x in range(1000)] for y in range(1000)] for claim in lines:", "y + b): canvas[i][j] += 1 def checkIntact(canvas, claim): x = int(claim.split(' ')[2].split(',')[0])", "+ b): canvas[i][j] += 1 def checkIntact(canvas, claim): x = int(claim.split(' ')[2].split(',')[0]) y", "for x in range(1000)] for y in range(1000)] for claim in lines: x", "range(x, x + a): for j in range(y, y + b): canvas[i][j] +=", "in lines: x = int(claim.split(' ')[2].split(',')[0]) y = int(claim.split(' ')[2].split(',')[1][:-1]) a = int(claim.split('", "> 1: return False # If we get here the claim is intact!", "')[3].split('x')[0]) b = int(claim.split(' ')[3].split('x')[1]) for i in range(x, x + a): for", "claim): x = int(claim.split(' ')[2].split(',')[0]) y = int(claim.split(' ')[2].split(',')[1][:-1]) a = int(claim.split(' ')[3].split('x')[0])", "<filename>2018/03/intact.py #!/usr/bin/env python3.7 import sys lines = [claim.rstrip('\\n') for claim in open(sys.argv[1])] canvas", "+= 1 def checkIntact(canvas, claim): x = int(claim.split(' ')[2].split(',')[0]) y = int(claim.split(' ')[2].split(',')[1][:-1])", "python3.7 import sys lines = [claim.rstrip('\\n') for claim in open(sys.argv[1])] canvas = [[0" ]
[ "= ( (_('Navigation options'), { 'classes': ('collapse',), 'fields': ( 'grid_num_rows', 'theme_navigation_type', 'theme_bullets_margin_top', 'theme_bullets_color',", "'theme_navigation_type', 'theme_bullets_margin_top', 'theme_bullets_color', 'bullets_space_between', 'theme_arrows_margin_top', 'theme_space_between_arrows', 'theme_navigation_align', 'theme_navigation_offset_hor', ) }), ) class CarouselNavigationUniteOptionsAdmin(admin.ModelAdmin):", "django.contrib import admin from django.utils.translation import ugettext_lazy as _ class TilesGridNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Tiles", "CarouselNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Carousel ''' fieldsets = ( (_('Navigation options'), { 'classes': ('collapse',), 'fields':", "'classes': ('collapse',), 'fields': ( 'theme_enable_navigation', 'theme_navigation_position', 'theme_navigation_enable_play', 'theme_navigation_align', 'theme_navigation_offset_hor', 'theme_navigation_margin', 'theme_space_between_arrows', ) }),", "from django.utils.translation import ugettext_lazy as _ class TilesGridNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Tiles - Grid '''", "}), ) class CarouselNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Carousel ''' fieldsets = ( (_('Navigation options'), {", ") class CarouselNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Carousel ''' fieldsets = ( (_('Navigation options'), { 'classes':", "''' fieldsets = ( (_('Navigation options'), { 'classes': ('collapse',), 'fields': ( 'theme_enable_navigation', 'theme_navigation_position',", "Grid ''' fieldsets = ( (_('Navigation options'), { 'classes': ('collapse',), 'fields': ( 'grid_num_rows',", "_ class TilesGridNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Tiles - Grid ''' fieldsets = ( (_('Navigation options'),", "( (_('Navigation options'), { 'classes': ('collapse',), 'fields': ( 'theme_enable_navigation', 'theme_navigation_position', 'theme_navigation_enable_play', 'theme_navigation_align', 'theme_navigation_offset_hor',", "( (_('Navigation options'), { 'classes': ('collapse',), 'fields': ( 'grid_num_rows', 'theme_navigation_type', 'theme_bullets_margin_top', 'theme_bullets_color', 'bullets_space_between',", "('collapse',), 'fields': ( 'grid_num_rows', 'theme_navigation_type', 'theme_bullets_margin_top', 'theme_bullets_color', 'bullets_space_between', 'theme_arrows_margin_top', 'theme_space_between_arrows', 'theme_navigation_align', 'theme_navigation_offset_hor', )", "fieldsets = ( (_('Navigation options'), { 'classes': ('collapse',), 'fields': ( 'theme_enable_navigation', 'theme_navigation_position', 'theme_navigation_enable_play',", "(_('Navigation options'), { 'classes': ('collapse',), 'fields': ( 'theme_enable_navigation', 'theme_navigation_position', 'theme_navigation_enable_play', 'theme_navigation_align', 'theme_navigation_offset_hor', 'theme_navigation_margin',", "class CarouselNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Carousel ''' fieldsets = ( (_('Navigation options'), { 'classes': ('collapse',),", "'theme_arrows_margin_top', 'theme_space_between_arrows', 'theme_navigation_align', 'theme_navigation_offset_hor', ) }), ) class CarouselNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Carousel ''' fieldsets", "import ugettext_lazy as _ class TilesGridNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Tiles - Grid ''' fieldsets =", "'theme_bullets_margin_top', 'theme_bullets_color', 'bullets_space_between', 'theme_arrows_margin_top', 'theme_space_between_arrows', 'theme_navigation_align', 'theme_navigation_offset_hor', ) }), ) class CarouselNavigationUniteOptionsAdmin(admin.ModelAdmin): '''", "'theme_navigation_align', 'theme_navigation_offset_hor', ) }), ) class CarouselNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Carousel ''' fieldsets = (", "class TilesGridNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Tiles - Grid ''' fieldsets = ( (_('Navigation options'), {", "''' Tiles - Grid ''' fieldsets = ( (_('Navigation options'), { 'classes': ('collapse',),", "'classes': ('collapse',), 'fields': ( 'grid_num_rows', 'theme_navigation_type', 'theme_bullets_margin_top', 'theme_bullets_color', 'bullets_space_between', 'theme_arrows_margin_top', 'theme_space_between_arrows', 'theme_navigation_align', 'theme_navigation_offset_hor',", ") }), ) class CarouselNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Carousel ''' fieldsets = ( (_('Navigation options'),", "''' Carousel ''' fieldsets = ( (_('Navigation options'), { 'classes': ('collapse',), 'fields': (", "django.utils.translation import ugettext_lazy as _ class TilesGridNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Tiles - Grid ''' fieldsets", "{ 'classes': ('collapse',), 'fields': ( 'grid_num_rows', 'theme_navigation_type', 'theme_bullets_margin_top', 'theme_bullets_color', 'bullets_space_between', 'theme_arrows_margin_top', 'theme_space_between_arrows', 'theme_navigation_align',", "Carousel ''' fieldsets = ( (_('Navigation options'), { 'classes': ('collapse',), 'fields': ( 'theme_enable_navigation',", "( 'grid_num_rows', 'theme_navigation_type', 'theme_bullets_margin_top', 'theme_bullets_color', 'bullets_space_between', 'theme_arrows_margin_top', 'theme_space_between_arrows', 'theme_navigation_align', 'theme_navigation_offset_hor', ) }), )", "'bullets_space_between', 'theme_arrows_margin_top', 'theme_space_between_arrows', 'theme_navigation_align', 'theme_navigation_offset_hor', ) }), ) class CarouselNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Carousel '''", "'grid_num_rows', 'theme_navigation_type', 'theme_bullets_margin_top', 'theme_bullets_color', 'bullets_space_between', 'theme_arrows_margin_top', 'theme_space_between_arrows', 'theme_navigation_align', 'theme_navigation_offset_hor', ) }), ) class", "TilesGridNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Tiles - Grid ''' fieldsets = ( (_('Navigation options'), { 'classes':", "fieldsets = ( (_('Navigation options'), { 'classes': ('collapse',), 'fields': ( 'grid_num_rows', 'theme_navigation_type', 'theme_bullets_margin_top',", "import admin from django.utils.translation import ugettext_lazy as _ class TilesGridNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Tiles -", "from django.contrib import admin from django.utils.translation import ugettext_lazy as _ class TilesGridNavigationUniteOptionsAdmin(admin.ModelAdmin): '''", "'theme_bullets_color', 'bullets_space_between', 'theme_arrows_margin_top', 'theme_space_between_arrows', 'theme_navigation_align', 'theme_navigation_offset_hor', ) }), ) class CarouselNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Carousel", "'theme_navigation_offset_hor', ) }), ) class CarouselNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Carousel ''' fieldsets = ( (_('Navigation", "as _ class TilesGridNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Tiles - Grid ''' fieldsets = ( (_('Navigation", "''' fieldsets = ( (_('Navigation options'), { 'classes': ('collapse',), 'fields': ( 'grid_num_rows', 'theme_navigation_type',", "{ 'classes': ('collapse',), 'fields': ( 'theme_enable_navigation', 'theme_navigation_position', 'theme_navigation_enable_play', 'theme_navigation_align', 'theme_navigation_offset_hor', 'theme_navigation_margin', 'theme_space_between_arrows', )", "('collapse',), 'fields': ( 'theme_enable_navigation', 'theme_navigation_position', 'theme_navigation_enable_play', 'theme_navigation_align', 'theme_navigation_offset_hor', 'theme_navigation_margin', 'theme_space_between_arrows', ) }), )", "'theme_space_between_arrows', 'theme_navigation_align', 'theme_navigation_offset_hor', ) }), ) class CarouselNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Carousel ''' fieldsets =", "'fields': ( 'grid_num_rows', 'theme_navigation_type', 'theme_bullets_margin_top', 'theme_bullets_color', 'bullets_space_between', 'theme_arrows_margin_top', 'theme_space_between_arrows', 'theme_navigation_align', 'theme_navigation_offset_hor', ) }),", "options'), { 'classes': ('collapse',), 'fields': ( 'theme_enable_navigation', 'theme_navigation_position', 'theme_navigation_enable_play', 'theme_navigation_align', 'theme_navigation_offset_hor', 'theme_navigation_margin', 'theme_space_between_arrows',", "= ( (_('Navigation options'), { 'classes': ('collapse',), 'fields': ( 'theme_enable_navigation', 'theme_navigation_position', 'theme_navigation_enable_play', 'theme_navigation_align',", "(_('Navigation options'), { 'classes': ('collapse',), 'fields': ( 'grid_num_rows', 'theme_navigation_type', 'theme_bullets_margin_top', 'theme_bullets_color', 'bullets_space_between', 'theme_arrows_margin_top',", "- Grid ''' fieldsets = ( (_('Navigation options'), { 'classes': ('collapse',), 'fields': (", "<reponame>ripiu/djangocms_aoxomoxoa from django.contrib import admin from django.utils.translation import ugettext_lazy as _ class TilesGridNavigationUniteOptionsAdmin(admin.ModelAdmin):", "options'), { 'classes': ('collapse',), 'fields': ( 'grid_num_rows', 'theme_navigation_type', 'theme_bullets_margin_top', 'theme_bullets_color', 'bullets_space_between', 'theme_arrows_margin_top', 'theme_space_between_arrows',", "admin from django.utils.translation import ugettext_lazy as _ class TilesGridNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Tiles - Grid", "Tiles - Grid ''' fieldsets = ( (_('Navigation options'), { 'classes': ('collapse',), 'fields':", "ugettext_lazy as _ class TilesGridNavigationUniteOptionsAdmin(admin.ModelAdmin): ''' Tiles - Grid ''' fieldsets = (" ]
[ "# All rights are reserved to the authors ... I only used a", "if neighbor == node: inc[com] = inc.get(com, 0.) + float(edge_weight) else: inc[com] =", "which is decomposed weight : str, optional the key in graph to use", "= inc.get(com, 0.) + float(edge_weight) / 2. res = 0. for com in", "graph nodes ValueError If the graph has no link TypeError If graph is", "+= inc.get(com, 0.) - \\ ((deg.get(com, 0.) ** 2) / (4. * links))", "KeyError If the partition is not a partition of all graph nodes ValueError", "\"\"\" inc = dict([]) deg = dict([]) links = graph.size(weight=weight) if links ==", "graph has no link TypeError If graph is not a networkx.Graph References ----------", "node in graph: com = partition[node] deg[com] = deg.get(com, 0.) + graph.degree(node, weight=weight)", "str, optional the key in graph to use as weight. Default to 'weight'", "networkx.Graph References ---------- .. 1. Newman, M.E.J. & <NAME>. Finding and evaluating community", "graph[node].items(): edge_weight = datas.get(weight, 1) if partition[neighbor] == com: if neighbor == node:", "<NAME>. Finding and evaluating community structure in networks. Physical Review E 69, 26113(2004).", "partition of all graph nodes ValueError If the graph has no link TypeError", "code :) __author__ = \"\"\"<NAME> (<EMAIL>)\"\"\" def modularity(partition, graph, weight='weight'): \"\"\"Compute the modularity", "communities graph : networkx.Graph the networkx graph which is decomposed weight : str,", "and evaluating community structure in networks. Physical Review E 69, 26113(2004). Examples --------", "modularity Raises ------ KeyError If the partition is not a partition of all", "a partition of a graph Parameters ---------- partition : dict the partition of", "for com in set(partition.values()): res += inc.get(com, 0.) - \\ ((deg.get(com, 0.) **", "the authors ... I only used a span of his code :) __author__", "are their nodes and values the communities graph : networkx.Graph the networkx graph", "= dict([]) links = graph.size(weight=weight) if links == 0: raise ValueError(\"A graph without", "2. res = 0. for com in set(partition.values()): res += inc.get(com, 0.) -", "in set(partition.values()): res += inc.get(com, 0.) - \\ ((deg.get(com, 0.) ** 2) /", "com: if neighbor == node: inc[com] = inc.get(com, 0.) + float(edge_weight) else: inc[com]", "------- modularity : float The modularity Raises ------ KeyError If the partition is", "of a graph Parameters ---------- partition : dict the partition of the nodes,", "26113(2004). Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>> part = best_partition(G) >>> modularity(part, G)", "modularity\") for node in graph: com = partition[node] deg[com] = deg.get(com, 0.) +", "= inc.get(com, 0.) + float(edge_weight) else: inc[com] = inc.get(com, 0.) + float(edge_weight) /", "partition[node] deg[com] = deg.get(com, 0.) + graph.degree(node, weight=weight) for neighbor, datas in graph[node].items():", "all graph nodes ValueError If the graph has no link TypeError If graph", "a networkx.Graph References ---------- .. 1. Newman, M.E.J. & <NAME>. Finding and evaluating", "1. Newman, M.E.J. & <NAME>. Finding and evaluating community structure in networks. Physical", "if type(graph) != nx.Graph: raise TypeError(\"Bad graph type, use only non directed graph\")", "the partition of the nodes, i.e a dictionary where keys are their nodes", "Parameters ---------- partition : dict the partition of the nodes, i.e a dictionary", "If the partition is not a partition of all graph nodes ValueError If", "dict the partition of the nodes, i.e a dictionary where keys are their", "node: inc[com] = inc.get(com, 0.) + float(edge_weight) else: inc[com] = inc.get(com, 0.) +", ": str, optional the key in graph to use as weight. Default to", "== node: inc[com] = inc.get(com, 0.) + float(edge_weight) else: inc[com] = inc.get(com, 0.)", "graph, weight='weight'): \"\"\"Compute the modularity of a partition of a graph Parameters ----------", "inc[com] = inc.get(com, 0.) + float(edge_weight) / 2. res = 0. for com", "to use as weight. Default to 'weight' Returns ------- modularity : float The", "partition : dict the partition of the nodes, i.e a dictionary where keys", "= partition[node] deg[com] = deg.get(com, 0.) + graph.degree(node, weight=weight) for neighbor, datas in", "if links == 0: raise ValueError(\"A graph without link has an undefined modularity\")", "ValueError If the graph has no link TypeError If graph is not a", "modularity : float The modularity Raises ------ KeyError If the partition is not", "inc.get(com, 0.) - \\ ((deg.get(com, 0.) ** 2) / (4. * links)) return", "networks. Physical Review E 69, 26113(2004). Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>> part", "authors ... I only used a span of his code :) __author__ =", "dict([]) links = graph.size(weight=weight) if links == 0: raise ValueError(\"A graph without link", "---------- partition : dict the partition of the nodes, i.e a dictionary where", "= best_partition(G) >>> modularity(part, G) if type(graph) != nx.Graph: raise TypeError(\"Bad graph type,", "deg.get(com, 0.) + graph.degree(node, weight=weight) for neighbor, datas in graph[node].items(): edge_weight = datas.get(weight,", "+ graph.degree(node, weight=weight) for neighbor, datas in graph[node].items(): edge_weight = datas.get(weight, 1) if", "0.01) >>> part = best_partition(G) >>> modularity(part, G) if type(graph) != nx.Graph: raise", "use as weight. Default to 'weight' Returns ------- modularity : float The modularity", "Newman, M.E.J. & <NAME>. Finding and evaluating community structure in networks. Physical Review", "reserved to the authors ... I only used a span of his code", "else: inc[com] = inc.get(com, 0.) + float(edge_weight) / 2. res = 0. for", "neighbor, datas in graph[node].items(): edge_weight = datas.get(weight, 1) if partition[neighbor] == com: if", "decomposed weight : str, optional the key in graph to use as weight.", ">>> G=nx.erdos_renyi_graph(100, 0.01) >>> part = best_partition(G) >>> modularity(part, G) if type(graph) !=", "graph type, use only non directed graph\") \"\"\" inc = dict([]) deg =", "'weight' Returns ------- modularity : float The modularity Raises ------ KeyError If the", "networkx graph which is decomposed weight : str, optional the key in graph", "is decomposed weight : str, optional the key in graph to use as", "!= nx.Graph: raise TypeError(\"Bad graph type, use only non directed graph\") \"\"\" inc", "networkx as nx # All rights are reserved to the authors ... I", "type, use only non directed graph\") \"\"\" inc = dict([]) deg = dict([])", "- \\ ((deg.get(com, 0.) ** 2) / (4. * links)) return (1.0 /", "-------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>> part = best_partition(G) >>> modularity(part, G) if type(graph)", "dictionary where keys are their nodes and values the communities graph : networkx.Graph", "graph.degree(node, weight=weight) for neighbor, datas in graph[node].items(): edge_weight = datas.get(weight, 1) if partition[neighbor]", "an undefined modularity\") for node in graph: com = partition[node] deg[com] = deg.get(com,", "modularity of a partition of a graph Parameters ---------- partition : dict the", "rights are reserved to the authors ... I only used a span of", "span of his code :) __author__ = \"\"\"<NAME> (<EMAIL>)\"\"\" def modularity(partition, graph, weight='weight'):", "not a partition of all graph nodes ValueError If the graph has no", "the graph has no link TypeError If graph is not a networkx.Graph References", "69, 26113(2004). Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>> part = best_partition(G) >>> modularity(part,", "1) if partition[neighbor] == com: if neighbor == node: inc[com] = inc.get(com, 0.)", "Finding and evaluating community structure in networks. Physical Review E 69, 26113(2004). Examples", "TypeError If graph is not a networkx.Graph References ---------- .. 1. Newman, M.E.J.", "TypeError(\"Bad graph type, use only non directed graph\") \"\"\" inc = dict([]) deg", "graph which is decomposed weight : str, optional the key in graph to", ": float The modularity Raises ------ KeyError If the partition is not a", "the partition is not a partition of all graph nodes ValueError If the", "weight. Default to 'weight' Returns ------- modularity : float The modularity Raises ------", "All rights are reserved to the authors ... I only used a span", "graph is not a networkx.Graph References ---------- .. 1. Newman, M.E.J. & <NAME>.", "links == 0: raise ValueError(\"A graph without link has an undefined modularity\") for", "if partition[neighbor] == com: if neighbor == node: inc[com] = inc.get(com, 0.) +", "res += inc.get(com, 0.) - \\ ((deg.get(com, 0.) ** 2) / (4. *", "only non directed graph\") \"\"\" inc = dict([]) deg = dict([]) links =", "partition is not a partition of all graph nodes ValueError If the graph", "weight=weight) for neighbor, datas in graph[node].items(): edge_weight = datas.get(weight, 1) if partition[neighbor] ==", "to the authors ... I only used a span of his code :)", "of the nodes, i.e a dictionary where keys are their nodes and values", "0. for com in set(partition.values()): res += inc.get(com, 0.) - \\ ((deg.get(com, 0.)", "link has an undefined modularity\") for node in graph: com = partition[node] deg[com]", "the networkx graph which is decomposed weight : str, optional the key in", "0.) ** 2) / (4. * links)) return (1.0 / links) * res", "= dict([]) deg = dict([]) links = graph.size(weight=weight) if links == 0: raise", "0.) + graph.degree(node, weight=weight) for neighbor, datas in graph[node].items(): edge_weight = datas.get(weight, 1)", "... I only used a span of his code :) __author__ = \"\"\"<NAME>", "keys are their nodes and values the communities graph : networkx.Graph the networkx", "---------- .. 1. Newman, M.E.J. & <NAME>. Finding and evaluating community structure in", "in graph to use as weight. Default to 'weight' Returns ------- modularity :", ":) __author__ = \"\"\"<NAME> (<EMAIL>)\"\"\" def modularity(partition, graph, weight='weight'): \"\"\"Compute the modularity of", "set(partition.values()): res += inc.get(com, 0.) - \\ ((deg.get(com, 0.) ** 2) / (4.", "part = best_partition(G) >>> modularity(part, G) if type(graph) != nx.Graph: raise TypeError(\"Bad graph", "undefined modularity\") for node in graph: com = partition[node] deg[com] = deg.get(com, 0.)", "\\ ((deg.get(com, 0.) ** 2) / (4. * links)) return (1.0 / links)", "float The modularity Raises ------ KeyError If the partition is not a partition", "best_partition(G) >>> modularity(part, G) if type(graph) != nx.Graph: raise TypeError(\"Bad graph type, use", "Raises ------ KeyError If the partition is not a partition of all graph", "com = partition[node] deg[com] = deg.get(com, 0.) + graph.degree(node, weight=weight) for neighbor, datas", "modularity(part, G) if type(graph) != nx.Graph: raise TypeError(\"Bad graph type, use only non", "nx # All rights are reserved to the authors ... I only used", "import networkx as nx # All rights are reserved to the authors ...", "0.) - \\ ((deg.get(com, 0.) ** 2) / (4. * links)) return (1.0", "graph : networkx.Graph the networkx graph which is decomposed weight : str, optional", "is not a networkx.Graph References ---------- .. 1. Newman, M.E.J. & <NAME>. Finding", "ValueError(\"A graph without link has an undefined modularity\") for node in graph: com", "no link TypeError If graph is not a networkx.Graph References ---------- .. 1.", "to 'weight' Returns ------- modularity : float The modularity Raises ------ KeyError If", "Review E 69, 26113(2004). Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>> part = best_partition(G)", "+ float(edge_weight) else: inc[com] = inc.get(com, 0.) + float(edge_weight) / 2. res =", "G) if type(graph) != nx.Graph: raise TypeError(\"Bad graph type, use only non directed", ": networkx.Graph the networkx graph which is decomposed weight : str, optional the", "== 0: raise ValueError(\"A graph without link has an undefined modularity\") for node", "\"\"\"<NAME> (<EMAIL>)\"\"\" def modularity(partition, graph, weight='weight'): \"\"\"Compute the modularity of a partition of", "structure in networks. Physical Review E 69, 26113(2004). Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01)", "his code :) __author__ = \"\"\"<NAME> (<EMAIL>)\"\"\" def modularity(partition, graph, weight='weight'): \"\"\"Compute the", "\"\"\"Compute the modularity of a partition of a graph Parameters ---------- partition :", "of all graph nodes ValueError If the graph has no link TypeError If", "only used a span of his code :) __author__ = \"\"\"<NAME> (<EMAIL>)\"\"\" def", "a dictionary where keys are their nodes and values the communities graph :", "datas in graph[node].items(): edge_weight = datas.get(weight, 1) if partition[neighbor] == com: if neighbor", "not a networkx.Graph References ---------- .. 1. Newman, M.E.J. & <NAME>. Finding and", "Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>> part = best_partition(G) >>> modularity(part, G) if", "raise ValueError(\"A graph without link has an undefined modularity\") for node in graph:", "/ 2. res = 0. for com in set(partition.values()): res += inc.get(com, 0.)", "a partition of all graph nodes ValueError If the graph has no link", "If the graph has no link TypeError If graph is not a networkx.Graph", "Physical Review E 69, 26113(2004). Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>> part =", "non directed graph\") \"\"\" inc = dict([]) deg = dict([]) links = graph.size(weight=weight)", "res = 0. for com in set(partition.values()): res += inc.get(com, 0.) - \\", "= graph.size(weight=weight) if links == 0: raise ValueError(\"A graph without link has an", "= datas.get(weight, 1) if partition[neighbor] == com: if neighbor == node: inc[com] =", "I only used a span of his code :) __author__ = \"\"\"<NAME> (<EMAIL>)\"\"\"", "networkx.Graph the networkx graph which is decomposed weight : str, optional the key", "in graph[node].items(): edge_weight = datas.get(weight, 1) if partition[neighbor] == com: if neighbor ==", "float(edge_weight) else: inc[com] = inc.get(com, 0.) + float(edge_weight) / 2. res = 0.", "use only non directed graph\") \"\"\" inc = dict([]) deg = dict([]) links", "partition of a graph Parameters ---------- partition : dict the partition of the", "the nodes, i.e a dictionary where keys are their nodes and values the", "without link has an undefined modularity\") for node in graph: com = partition[node]", "values the communities graph : networkx.Graph the networkx graph which is decomposed weight", "edge_weight = datas.get(weight, 1) if partition[neighbor] == com: if neighbor == node: inc[com]", "((deg.get(com, 0.) ** 2) / (4. * links)) return (1.0 / links) *", "== com: if neighbor == node: inc[com] = inc.get(com, 0.) + float(edge_weight) else:", "has an undefined modularity\") for node in graph: com = partition[node] deg[com] =", "0.) + float(edge_weight) else: inc[com] = inc.get(com, 0.) + float(edge_weight) / 2. res", "References ---------- .. 1. Newman, M.E.J. & <NAME>. Finding and evaluating community structure", "of a partition of a graph Parameters ---------- partition : dict the partition", "the key in graph to use as weight. Default to 'weight' Returns -------", "The modularity Raises ------ KeyError If the partition is not a partition of", "nodes and values the communities graph : networkx.Graph the networkx graph which is", "datas.get(weight, 1) if partition[neighbor] == com: if neighbor == node: inc[com] = inc.get(com,", "i.e a dictionary where keys are their nodes and values the communities graph", "key in graph to use as weight. Default to 'weight' Returns ------- modularity", "partition of the nodes, i.e a dictionary where keys are their nodes and", "their nodes and values the communities graph : networkx.Graph the networkx graph which", "and values the communities graph : networkx.Graph the networkx graph which is decomposed", "has no link TypeError If graph is not a networkx.Graph References ---------- ..", "__author__ = \"\"\"<NAME> (<EMAIL>)\"\"\" def modularity(partition, graph, weight='weight'): \"\"\"Compute the modularity of a", "------ KeyError If the partition is not a partition of all graph nodes", "as nx # All rights are reserved to the authors ... I only", "dict([]) deg = dict([]) links = graph.size(weight=weight) if links == 0: raise ValueError(\"A", "E 69, 26113(2004). Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>> part = best_partition(G) >>>", "raise TypeError(\"Bad graph type, use only non directed graph\") \"\"\" inc = dict([])", "in graph: com = partition[node] deg[com] = deg.get(com, 0.) + graph.degree(node, weight=weight) for", "If graph is not a networkx.Graph References ---------- .. 1. Newman, M.E.J. &", "graph\") \"\"\" inc = dict([]) deg = dict([]) links = graph.size(weight=weight) if links", "deg[com] = deg.get(com, 0.) + graph.degree(node, weight=weight) for neighbor, datas in graph[node].items(): edge_weight", "links = graph.size(weight=weight) if links == 0: raise ValueError(\"A graph without link has", "graph Parameters ---------- partition : dict the partition of the nodes, i.e a", "used a span of his code :) __author__ = \"\"\"<NAME> (<EMAIL>)\"\"\" def modularity(partition,", "the communities graph : networkx.Graph the networkx graph which is decomposed weight :", "nx.Graph: raise TypeError(\"Bad graph type, use only non directed graph\") \"\"\" inc =", ">>> modularity(part, G) if type(graph) != nx.Graph: raise TypeError(\"Bad graph type, use only", "for node in graph: com = partition[node] deg[com] = deg.get(com, 0.) + graph.degree(node,", "graph to use as weight. Default to 'weight' Returns ------- modularity : float", "inc.get(com, 0.) + float(edge_weight) else: inc[com] = inc.get(com, 0.) + float(edge_weight) / 2.", "com in set(partition.values()): res += inc.get(com, 0.) - \\ ((deg.get(com, 0.) ** 2)", "(<EMAIL>)\"\"\" def modularity(partition, graph, weight='weight'): \"\"\"Compute the modularity of a partition of a", "the modularity of a partition of a graph Parameters ---------- partition : dict", "link TypeError If graph is not a networkx.Graph References ---------- .. 1. Newman,", "= 0. for com in set(partition.values()): res += inc.get(com, 0.) - \\ ((deg.get(com,", "are reserved to the authors ... I only used a span of his", "as weight. Default to 'weight' Returns ------- modularity : float The modularity Raises", "deg = dict([]) links = graph.size(weight=weight) if links == 0: raise ValueError(\"A graph", "where keys are their nodes and values the communities graph : networkx.Graph the", "graph.size(weight=weight) if links == 0: raise ValueError(\"A graph without link has an undefined", "Default to 'weight' Returns ------- modularity : float The modularity Raises ------ KeyError", "M.E.J. & <NAME>. Finding and evaluating community structure in networks. Physical Review E", "neighbor == node: inc[com] = inc.get(com, 0.) + float(edge_weight) else: inc[com] = inc.get(com,", "def modularity(partition, graph, weight='weight'): \"\"\"Compute the modularity of a partition of a graph", "optional the key in graph to use as weight. Default to 'weight' Returns", "evaluating community structure in networks. Physical Review E 69, 26113(2004). Examples -------- >>>", ">>> part = best_partition(G) >>> modularity(part, G) if type(graph) != nx.Graph: raise TypeError(\"Bad", "type(graph) != nx.Graph: raise TypeError(\"Bad graph type, use only non directed graph\") \"\"\"", "community structure in networks. Physical Review E 69, 26113(2004). Examples -------- >>> G=nx.erdos_renyi_graph(100,", "<reponame>tchimih/NSD_project import networkx as nx # All rights are reserved to the authors", "nodes ValueError If the graph has no link TypeError If graph is not", "nodes, i.e a dictionary where keys are their nodes and values the communities", "partition[neighbor] == com: if neighbor == node: inc[com] = inc.get(com, 0.) + float(edge_weight)", "is not a partition of all graph nodes ValueError If the graph has", "inc = dict([]) deg = dict([]) links = graph.size(weight=weight) if links == 0:", "Returns ------- modularity : float The modularity Raises ------ KeyError If the partition", "+ float(edge_weight) / 2. res = 0. for com in set(partition.values()): res +=", "of his code :) __author__ = \"\"\"<NAME> (<EMAIL>)\"\"\" def modularity(partition, graph, weight='weight'): \"\"\"Compute", "modularity(partition, graph, weight='weight'): \"\"\"Compute the modularity of a partition of a graph Parameters", "in networks. Physical Review E 69, 26113(2004). Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>>", "float(edge_weight) / 2. res = 0. for com in set(partition.values()): res += inc.get(com,", "directed graph\") \"\"\" inc = dict([]) deg = dict([]) links = graph.size(weight=weight) if", "= deg.get(com, 0.) + graph.degree(node, weight=weight) for neighbor, datas in graph[node].items(): edge_weight =", "inc.get(com, 0.) + float(edge_weight) / 2. res = 0. for com in set(partition.values()):", "= \"\"\"<NAME> (<EMAIL>)\"\"\" def modularity(partition, graph, weight='weight'): \"\"\"Compute the modularity of a partition", "weight='weight'): \"\"\"Compute the modularity of a partition of a graph Parameters ---------- partition", "G=nx.erdos_renyi_graph(100, 0.01) >>> part = best_partition(G) >>> modularity(part, G) if type(graph) != nx.Graph:", "graph without link has an undefined modularity\") for node in graph: com =", ".. 1. Newman, M.E.J. & <NAME>. Finding and evaluating community structure in networks.", "graph: com = partition[node] deg[com] = deg.get(com, 0.) + graph.degree(node, weight=weight) for neighbor,", "for neighbor, datas in graph[node].items(): edge_weight = datas.get(weight, 1) if partition[neighbor] == com:", "inc[com] = inc.get(com, 0.) + float(edge_weight) else: inc[com] = inc.get(com, 0.) + float(edge_weight)", "a graph Parameters ---------- partition : dict the partition of the nodes, i.e", "weight : str, optional the key in graph to use as weight. Default", ": dict the partition of the nodes, i.e a dictionary where keys are", "& <NAME>. Finding and evaluating community structure in networks. Physical Review E 69,", "0.) + float(edge_weight) / 2. res = 0. for com in set(partition.values()): res", "0: raise ValueError(\"A graph without link has an undefined modularity\") for node in", "a span of his code :) __author__ = \"\"\"<NAME> (<EMAIL>)\"\"\" def modularity(partition, graph," ]
[ "k, v in sorted(d.items()): if isinstance(v, dict): print('\\t'*level + k+':') print_dict(v, level+1, list_on_levels)", "'norm': normalize }, { 'data': stiffs, 'title': 'Stiffness', 'out_file': os.path.join(results_folder, 'stiffness', 'stiff_dist_sim_{}_{}.pdf'.format(seed, num_sim)),", "#') print_dict(config) print('# ' + '='*bar_num + ' #') def print_header_contr_evo(config, bar_num=50): print('#", "* 8 + 'EXPERIMENT CONFIGURATION') print('# ' + '=' * bar_num + '", "for evaluation only solutions # that have not been already evaluated skip =", "\"\"\" Load simulation history from provided file. In case no file is given,", "# ================================== # def load_history(history_file): \"\"\" Load simulation history from provided file. In", "import pickle import sqlite3 import numpy as np import matplotlib # select matplotlib", "bar_num + ' #') print_dict(config, list_on_levels=True) print('# ' + '=' * bar_num +", "into a local history to support MAP creation functionality if local_history is not", "matplotlib.rcParams[\"axes.titlepad\"] = 15 matplotlib.rcParams['figure.dpi'] = 300 # create potential needed directory where to", "maps simulator input strings into corresponding computed fitness. :param local_history: :param coev: :param", "fits, n_mods, stiffs = list(zip(*[(ind.get_fitness(), ind.num_modules, ind.stiffness) for ind in pop])) # plot", "1.1) plt.savefig(conf['out_file'], bbox_inches='tight') plt.close() def parse_robot_string(rb_string): robot = [] for module_str in rb_string.split('--')[:-1]:", "' ' * 8 + 'OF TENSEGRITY SOFT ROBOTS\\n\\t' + (' ' *", "print_dict(config, list_on_levels=False) print('# ' + '=' * bar_num + ' #') def store_checkpoint(checkpoint,", "' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' OF TENSEGRITY SOFT ROBOTS CONTROLLERS \\n\\t' +", "fitness_values = np.asarray(list(map(lambda i: i.fitness.values, population))) if coev: fitness_values = [1.0 / f", "robot population :param pop: population of robot individuals :param results_folder: folder where to", "matplotlib backend matplotlib.use('pdf') import matplotlib.pyplot as plt from params_conf import N_MODULES, STIFF_TABLE #", "store_history(history, history_file): \"\"\" Store simulation history in provided file :param history: :param history_file:", "are currently not used. def load_history_db(history_db): \"\"\" Load simulation history from provided db.", "================================== # def load_history(history_file): \"\"\" Load simulation history from provided file. In case", "SIMULATION\\n\\t' + ' '*7 + 'OF TENSEGRITY SOFT ROBOTS') print('# ' + '='*bar_num", "#') print_dict(config, list_on_levels=True) print('# ' + '=' * bar_num + ' #') def", "* 7 + 'OF MORPHOLOGY AND CONTROLLER\\n\\t' + ' ' * 8 +", "in sorted(inds_vals.items(), key=lambda r: float(r[0]))])) if conf['norm']: y = np.asarray(y)/np.sum(y) ax.bar(x, y, color=colors)", "zip(to_evaluate, fitnesses): ind.fitness.values = fit ind_string = ind.string_input() # update history records with", "below are currently not used. def load_history_db(history_db): \"\"\" Load simulation history from provided", "' #\\n') print('# ' + '=' * bar_num + ' #') print('\\t' +", "bar_num + ' #') print('\\t' + ' ' * 8 + 'EXPERIMENT CONFIGURATION')", "sorted(inds_vals.items(), key=lambda r: float(r[0]))])) if conf['norm']: y = np.asarray(y)/np.sum(y) ax.bar(x, y, color=colors) else:", "the properties distribution of last generation configs = [ { 'data': fits, 'title':", "'amplitude': float(params[4].strip()), 'phase': float(params[5].strip()), 'rot': float(params[6].strip()), 'stiff': float(params[7].strip()) } robot.append(module) return robot #", "support MAP creation functionality if local_history is not None: local_history[ind_string] = glob_history[ind_string] skip.append(ind)", "json import pickle import sqlite3 import numpy as np import matplotlib # select", "verbose: if pbar is not None: pbar.set_postfix({ 'avg': pop_stats['avg_fitness'], 'std': pop_stats['std_dev'], 'min': pop_stats['min'],", "* 8 + 'OF TENSEGRITY SOFT ROBOTS\\n\\t' + (' ' * 2 +", "'Source Sans Pro', 'size': 12, 'weight': 'light'} matplotlib.rc('font', **font) matplotlib.rcParams[\"axes.titlepad\"] = 15 matplotlib.rcParams['figure.dpi']", "history[robot_string.strip()] = (float(fitness),) return history def store_history(history, history_file): \"\"\" Store simulation history in", "= {'family': 'Source Sans Pro', 'size': 12, 'weight': 'light'} matplotlib.rc('font', **font) matplotlib.rcParams[\"axes.titlepad\"] =", ":return: \"\"\" font = {'family': 'Source Sans Pro', 'size': 12, 'weight': 'light'} matplotlib.rc('font',", "for ind in invalid_ind: ind_string = ind.string_input() if not coev and ind_string in", "'FOR GOAL REACHING TASK') + '\\n\\t' + ' ' * 7 + '(SINGLE", "header history_in.readline() for line in history_in: robot_string, fitness = line.strip().split(',') history[robot_string.strip()] = (float(fitness),)", "'weight': 'light'} matplotlib.rc('font', **font) matplotlib.rcParams[\"axes.titlepad\"] = 15 matplotlib.rcParams['figure.dpi'] = 300 # create potential", "the values of given property across :param conf: :return: \"\"\" font = {'family':", "CONTROLLER\\n\\t' + ' ' * 8 + 'OF TENSEGRITY SOFT ROBOTS\\n\\t' + ('", "of last generation configs = [ { 'data': fits, 'title': 'Fitness', 'h_range': (0,", "returns an empty history :param history_db: :return: \"\"\" history = {} if history_db", "robot_string, fitness = line.strip().split(',') history[robot_string.strip()] = (float(fitness),) return history def store_history(history, history_file): \"\"\"", "'EXPERIMENT CONFIGURATION') print('# ' + '=' * bar_num + ' #') print_dict(config, list_on_levels=True)", "+ '{}: {}'.format(k, v)) def print_header(config, bar_num=50): print('# ' + '='*bar_num + '", "and k == 'modules_conf': print('\\t' * level + '{}: ['.format(k)) for element in", "{}'.format(k, v)) def print_header(config, bar_num=50): print('# ' + '='*bar_num + ' #') print('\\t\\tEVOLUTIONARY", "sv in conf['bins']} for ind_stiff in conf['data']: inds_vals[str(ind_stiff)] += 1 x, y =", "ind.fitness.values = fit ind_string = ind.string_input() # update history records with latest fitness", "conf: :return: \"\"\" font = {'family': 'Source Sans Pro', 'size': 12, 'weight': 'light'}", "MAP creation functionality if local_history is not None: local_history[ind_string] = glob_history[ind_string] skip.append(ind) else:", "config['simulation_path'] else ' ' * 10 + 'FOR GOAL REACHING TASK') ) print('#", "in history_in: robot_string, fitness = line.strip().split(',') history[robot_string.strip()] = (float(fitness),) return history def store_history(history,", "cursor.close() conn.close() return history def store_history_db(history, history_db): \"\"\" Store simulation history in provided", "'FOR GOAL REACHING TASK') + '\\n\\t' + ' ' * 7 + '(DOUBLE", "'max': pop_stats['max'], 'skip': skips }) else: print('num_sims: {} | Fitness -> avg: {}", "history def store_history(history, history_file): \"\"\" Store simulation history in provided file :param history:", "record_info(logbook, stats, gen, pop, inv_ind): if stats is not None: record = stats.compile(pop)", "to_evaluate + skip, len(skip), n_evaluations def plot_population_stats(pop, results_folder, seed, num_sim, normalize=False): \"\"\" Plot", "MAP creation functionality if local_history is not None: local_history[ind_string] = fit if not", "k == 'modules_conf': print('\\t' * level + '{}: ['.format(k)) for element in v:", "min: {} max: {}'.format( pop_stats['num_sims'], pop_stats['avg_fitness'], pop_stats['std_dev'], pop_stats['min'], pop_stats['max'] )) def evaluate_ind(toolbox, individuals,", "= toolbox.map(toolbox.evaluate, to_evaluate) n_evaluations = len(fitnesses) for ind, fit in zip(to_evaluate, fitnesses): ind.fitness.values", "functions # # ================================== # def print_dict(d, level=0, list_on_levels=False): for k, v in", "NOTE: these functions below are currently not used. def load_history_db(history_db): \"\"\" Load simulation", "the same configuration ind.fitness.values = glob_history[ind_string] # store also record into a local", "in pop])) # plot the properties distribution of last generation configs = [", "#') print_dict(config, list_on_levels=False) print('# ' + '=' * bar_num + ' #') def", "else ' ' * 10 + 'FOR GOAL REACHING TASK') ) print('# '", "file.write(json.dumps({ **pop_stats, 'population': [individual.info(coev=coev) for individual in population] })) if verbose: if pbar", "+ '(SINGLE MAP-ELITES VARIANT)' ) print('# ' + '=' * bar_num + '", "not None: conn = sqlite3.connect(history_db) cursor = conn.cursor() for robot_string, fitness in cursor.execute('SELECT", "' #') def print_header_coev(config, bar_num=55): print('# ' + '=' * bar_num + '", "' #') print_dict(config, list_on_levels=False) print('# ' + '=' * bar_num + ' #')", "print('# ' + '='*bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' '*7 +", "history[robot_string] = (float(fitness),) cursor.close() conn.close() return history def store_history_db(history, history_db): \"\"\" Store simulation", "used. def load_history_db(history_db): \"\"\" Load simulation history from provided db. In case no", "float(params[4].strip()), 'phase': float(params[5].strip()), 'rot': float(params[6].strip()), 'stiff': float(params[7].strip()) } robot.append(module) return robot # ==================================", "' ' * 10 + 'FOR GOAL REACHING TASK') ) print('# ' +", "not coev: n_evaluations = len(individuals) return to_evaluate + skip, len(skip), n_evaluations def plot_population_stats(pop,", "def print_header_double_map(config, bar_num=55): print('# ' + '=' * bar_num + ' #') print('\\t\\t", "'n_modules', 'num_mods_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': N_MODULES, 'discrete_hist': True, 'norm': normalize }, { 'data': stiffs,", "bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' OF TENSEGRITY SOFT ROBOTS CONTROLLERS", "current population values file.write(json.dumps({ **pop_stats, 'population': [individual.info(coev=coev) for individual in population] })) if", "bar_num=50): print('# ' + '='*bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' '*7", "fitness REAL NOT NULL)''') cursor.executemany('''REPLACE INTO history(robot_string, fitness) VALUES (?)''', history_gen()) cursor.close() conn.close()", "\"\"\" # split robots interested properties into three lists fits, n_mods, stiffs =", "population] })) if verbose: if pbar is not None: pbar.set_postfix({ 'avg': pop_stats['avg_fitness'], 'std':", "ax.bar(x, y, color=colors) else: _, bins, patches = ax.hist(conf['data'], bins=conf['bins'], density=conf['norm'], range=conf['h_range']) ax.set_xticks(bins)", "def plot_population_dist(conf): \"\"\" Plot the distribution of the values of given property across", "pickle.dump(checkpoint, cp_file) def record_info(logbook, stats, gen, pop, inv_ind): if stats is not None:", "> 0: fitnesses = toolbox.map(toolbox.evaluate, to_evaluate) n_evaluations = len(fitnesses) for ind, fit in", "also record into a local history to support MAP creation functionality if local_history", "Plot the fitness/num_modules/stiffness distribution across given robot population :param pop: population of robot", "+ '\\n\\t' + ' ' * 7 + '(DOUBLE MAP-ELITES VARIANT)' ) print('#", "the distribution of the values of given property across :param conf: :return: \"\"\"", "# NOTE: these functions below are currently not used. def load_history_db(history_db): \"\"\" Load", "bins=conf['bins'], density=conf['norm'], range=conf['h_range']) ax.set_xticks(bins) for i, (c, p) in enumerate(zip(bins, patches)): plt.setp(p, 'facecolor',", "================================== # def print_dict(d, level=0, list_on_levels=False): for k, v in sorted(d.items()): if isinstance(v,", "cp_file: pickle.dump(checkpoint, cp_file) def record_info(logbook, stats, gen, pop, inv_ind): if stats is not", "is not None and os.path.exists(history_file): with open(history_file) as history_in: # skip header history_in.readline()", "* 7 + 'EXPERIMENT CONFIGURATION') print('# ' + '=' * bar_num + '", "# Utils functions # # ================================== # def print_dict(d, level=0, list_on_levels=False): for k,", "+ 'OF TENSEGRITY SOFT ROBOTS\\n\\t' + (' ' * 2 + 'FOR GOAL", "'order': int(params[0].strip()), 'connectedModules': int(params[1].strip()), 'connectedFaces': int(params[2].strip()), 'freq': float(params[3].strip()), 'amplitude': float(params[4].strip()), 'phase': float(params[5].strip()), 'rot':", "y = np.asarray(y)/np.sum(y) ax.bar(x, y, color=colors) else: _, bins, patches = ax.hist(conf['data'], bins=conf['bins'],", "list(zip(*[(ind.get_fitness(), ind.num_modules, ind.stiffness) for ind in pop])) # plot the properties distribution of", "len(skip), n_evaluations def plot_population_stats(pop, results_folder, seed, num_sim, normalize=False): \"\"\" Plot the fitness/num_modules/stiffness distribution", "#') print('\\t' + ' '*7 + 'EXPERIMENT CONFIGURATION') print('# ' + '='*bar_num +", "* level + '{}: ['.format(k)) for element in v: print('\\t' * (level+1) +", "len(conf['bins']) if isinstance(conf['bins'], list) else conf['bins'] colors = plt.cm.viridis(np.linspace(0, 1, num_colors)) fig =", "else: _, bins, patches = ax.hist(conf['data'], bins=conf['bins'], density=conf['norm'], range=conf['h_range']) ax.set_xticks(bins) for i, (c,", "whether to reports the plots in a normalized manner (default: False) :return: \"\"\"", "len(to_evaluate) > 0: fitnesses = toolbox.map(toolbox.evaluate, to_evaluate) n_evaluations = len(fitnesses) for ind, fit", "float(params[6].strip()), 'stiff': float(params[7].strip()) } robot.append(module) return robot # ================================== # # SIM HISTORY", "Individuals') if conf['norm']: ax.set_ylim(0, 1.1) plt.savefig(conf['out_file'], bbox_inches='tight') plt.close() def parse_robot_string(rb_string): robot = []", "in zip(to_evaluate, fitnesses): ind.fitness.values = fit ind_string = ind.string_input() # update history records", "backend matplotlib.use('pdf') import matplotlib.pyplot as plt from params_conf import N_MODULES, STIFF_TABLE # ==================================", "+ '='*bar_num + ' #') print('\\t' + ' '*7 + 'EXPERIMENT CONFIGURATION') print('#", "'phase': float(params[5].strip()), 'rot': float(params[6].strip()), 'stiff': float(params[7].strip()) } robot.append(module) return robot # ================================== #", "'EXPERIMENT CONFIGURATION') print('# ' + '='*bar_num + ' #') print_dict(config) print('# ' +", "print_header_double_map(config, bar_num=55): print('# ' + '=' * bar_num + ' #') print('\\t\\t EVOLUTIONARY", "1, num_colors)) fig = plt.figure(figsize=(12, 5)) ax = fig.gca() if conf['discrete_hist']: inds_vals =", "num_colors = len(conf['bins']) if isinstance(conf['bins'], list) else conf['bins'] colors = plt.cm.viridis(np.linspace(0, 1, num_colors))", "+ '=' * bar_num + ' #') def print_header_coev(config, bar_num=55): print('# ' +", "#') print('\\t' + ' ' * 8 + 'EXPERIMENT CONFIGURATION') print('# ' +", "# ================================== # # SIM HISTORY MANAGEMENT # # ================================== # def load_history(history_file):", "computed fitness. :param local_history: :param coev: :param eval_all: :return: \"\"\" # consider only", "stats is not None: record = stats.compile(pop) if stats is not None else", "exist_ok=True) num_colors = len(conf['bins']) if isinstance(conf['bins'], list) else conf['bins'] colors = plt.cm.viridis(np.linspace(0, 1,", "' + '='*bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' '*7 + 'OF", "+ 'EXPERIMENT CONFIGURATION') print('# ' + '=' * bar_num + ' #') print_dict(config,", "' * 8 + 'OF TENSEGRITY SOFT ROBOTS\\n\\t' + (' ' * 2", "else ' ' * 10 + 'FOR GOAL REACHING TASK') + '\\n\\t' +", "# def print_dict(d, level=0, list_on_levels=False): for k, v in sorted(d.items()): if isinstance(v, dict):", "rb_string.split('--')[:-1]: params = module_str.split('-') module = { 'order': int(params[0].strip()), 'connectedModules': int(params[1].strip()), 'connectedFaces': int(params[2].strip()),", "* bar_num + ' #') print('\\t\\tCO-EVOLUTIONARY SIMULATION\\n\\t' + ' ' * 7 +", "import matplotlib # select matplotlib backend matplotlib.use('pdf') import matplotlib.pyplot as plt from params_conf", "TASK') + '\\n\\t' + ' ' * 7 + '(SINGLE MAP-ELITES VARIANT)' )", "print('# ' + '=' * bar_num + ' #') def store_checkpoint(checkpoint, filename): with", "toolbox.map(toolbox.evaluate, to_evaluate) n_evaluations = len(fitnesses) for ind, fit in zip(to_evaluate, fitnesses): ind.fitness.values =", "= len(individuals) return to_evaluate + skip, len(skip), n_evaluations def plot_population_stats(pop, results_folder, seed, num_sim,", "(offsprings) invalid_ind = [ind for ind in individuals if (not ind.fitness.valid or (coev", "plot_population_dist(conf): \"\"\" Plot the distribution of the values of given property across :param", "glob_history, local_history=None, coev=False, eval_all=False): \"\"\" :param toolbox: :param individuals: :param glob_history: a dictionary", "def print_header(config, bar_num=50): print('# ' + '='*bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' +", "select matplotlib backend matplotlib.use('pdf') import matplotlib.pyplot as plt from params_conf import N_MODULES, STIFF_TABLE", "\"\"\" Plot the distribution of the values of given property across :param conf:", "print('\\t\\t EVOLUTIONARY SIMULATION\\n\\t' + ' ' * 7 + 'OF MORPHOLOGY AND CONTROLLER\\n\\t'", "ind.string_input() # update history records with latest fitness glob_history[ind_string] = fit # store", "in rb_string.split('--')[:-1]: params = module_str.split('-') module = { 'order': int(params[0].strip()), 'connectedModules': int(params[1].strip()), 'connectedFaces':", "simulation history from provided db. In case no db is given, returns an", "record to_init = not os.path.exists(history_db) conn = sqlite3.connect(history_db) cursor = conn.cursor() # create", "bar_num + ' #') print('\\t\\t EVOLUTIONARY SIMULATION\\n\\t' + ' ' * 7 +", "pop_stats['avg_fitness'], 'std': pop_stats['std_dev'], 'min': pop_stats['min'], 'max': pop_stats['max'], 'skip': skips }) else: print('num_sims: {}", "fit if not coev: n_evaluations = len(individuals) return to_evaluate + skip, len(skip), n_evaluations", "population of robot individuals :param results_folder: folder where to store the plots :param", "file, skips, pbar=None, verbose=False, coev=False): fitness_values = np.asarray(list(map(lambda i: i.fitness.values, population))) if coev:", "sqlite3.connect(history_db) cursor = conn.cursor() for robot_string, fitness in cursor.execute('SELECT * FROM history'): history[robot_string]", "| Fitness -> avg: {} std: {} min: {} max: {}'.format( pop_stats['num_sims'], pop_stats['avg_fitness'],", "k, v in sorted(inds_vals.items(), key=lambda r: float(r[0]))])) if conf['norm']: y = np.asarray(y)/np.sum(y) ax.bar(x,", "' * 8 + 'EXPERIMENT CONFIGURATION') print('# ' + '=' * bar_num +", "VARIANT)' ) print('# ' + '=' * bar_num + ' #\\n') print('# '", "store also record into a local history to support MAP creation functionality if", "# create the if to_init: cursor.execute('''CREATE TABLE history(robot_string VARCHAR PRIMARY KEY, fitness REAL", "r: float(r[0]))])) if conf['norm']: y = np.asarray(y)/np.sum(y) ax.bar(x, y, color=colors) else: _, bins,", "} robot.append(module) return robot # ================================== # # SIM HISTORY MANAGEMENT # #", "functions below are currently not used. def load_history_db(history_db): \"\"\" Load simulation history from", "+ ' #') def store_checkpoint(checkpoint, filename): with open(filename, 'wb') as cp_file: pickle.dump(checkpoint, cp_file)", "'\\n\\t' + ' ' * 7 + '(DOUBLE MAP-ELITES VARIANT)' ) print('# '", "' #') def print_header_contr_evo(config, bar_num=50): print('# ' + '=' * bar_num + '", "values file.write(json.dumps({ **pop_stats, 'population': [individual.info(coev=coev) for individual in population] })) if verbose: if", "None: pbar.set_postfix({ 'avg': pop_stats['avg_fitness'], 'std': pop_stats['std_dev'], 'min': pop_stats['min'], 'max': pop_stats['max'], 'skip': skips })", "= [] to_evaluate = [] for ind in invalid_ind: ind_string = ind.string_input() if", "' + '='*bar_num + ' #\\n') print('# ' + '='*bar_num + ' #')", "float(params[3].strip()), 'amplitude': float(params[4].strip()), 'phase': float(params[5].strip()), 'rot': float(params[6].strip()), 'stiff': float(params[7].strip()) } robot.append(module) return robot", "numpy as np import matplotlib # select matplotlib backend matplotlib.use('pdf') import matplotlib.pyplot as", "' + '='*bar_num + ' #') def print_header_contr_evo(config, bar_num=50): print('# ' + '='", "in provided file :param history: :param history_file: :return: \"\"\" with open(history_file, 'w') as", "history_in: # skip header history_in.readline() for line in history_in: robot_string, fitness = line.strip().split(',')", "n_evaluations = len(fitnesses) for ind, fit in zip(to_evaluate, fitnesses): ind.fitness.values = fit ind_string", "GOAL REACHING TASK') + '\\n\\t' + ' ' * 7 + '(DOUBLE MAP-ELITES", "+ ' #\\n') print('# ' + '=' * bar_num + ' #') print('\\t'", "the fitness/num_modules/stiffness distribution across given robot population :param pop: population of robot individuals", "= {} if history_file is not None and os.path.exists(history_file): with open(history_file) as history_in:", "in individuals if (not ind.fitness.valid or (coev and eval_all))] # select for evaluation", "+ ('FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path'] else '", "= { 'num_sims': num_sims, 'avg_fitness': np.mean(fitness_values), 'std_dev': np.std(fitness_values), 'min': np.min(fitness_values), 'max': np.max(fitness_values) }", "strings into corresponding computed fitness. :param local_history: :param coev: :param eval_all: :return: \"\"\"", "print_dict(v, level+1, list_on_levels) elif isinstance(v, list) and list_on_levels and k == 'modules_conf': print('\\t'", "+ '{}'.format(element)) print('\\t' * level + ']') else: print('\\t'*level + '{}: {}'.format(k, v))", "plt.savefig(conf['out_file'], bbox_inches='tight') plt.close() def parse_robot_string(rb_string): robot = [] for module_str in rb_string.split('--')[:-1]: params", "']') else: print('\\t'*level + '{}: {}'.format(k, v)) def print_header(config, bar_num=50): print('# ' +", "np import matplotlib # select matplotlib backend matplotlib.use('pdf') import matplotlib.pyplot as plt from", "for line in history_in: robot_string, fitness = line.strip().split(',') history[robot_string.strip()] = (float(fitness),) return history", "= list(zip(*[(ind.get_fitness(), ind.num_modules, ind.stiffness) for ind in pop])) # plot the properties distribution", "EVOLUTIONARY SIMULATION\\n\\t' + ' ' * 7 + 'OF MORPHOLOGY AND CONTROLLER\\n\\t' +", "only new individuals (offsprings) invalid_ind = [ind for ind in individuals if (not", "(default: False) :return: \"\"\" # split robots interested properties into three lists fits,", "REACHING TASK') + '\\n\\t' + ' ' * 7 + '(SINGLE MAP-ELITES VARIANT)'", "to_init: cursor.execute('''CREATE TABLE history(robot_string VARCHAR PRIMARY KEY, fitness REAL NOT NULL)''') cursor.executemany('''REPLACE INTO", "12, 'weight': 'light'} matplotlib.rc('font', **font) matplotlib.rcParams[\"axes.titlepad\"] = 15 matplotlib.rcParams['figure.dpi'] = 300 # create", "history.items(): yield record to_init = not os.path.exists(history_db) conn = sqlite3.connect(history_db) cursor = conn.cursor()", "+ ' #') print_dict(config, list_on_levels=False) print('# ' + '=' * bar_num + '", "for sv in conf['bins']} for ind_stiff in conf['data']: inds_vals[str(ind_stiff)] += 1 x, y", "history(robot_string VARCHAR PRIMARY KEY, fitness REAL NOT NULL)''') cursor.executemany('''REPLACE INTO history(robot_string, fitness) VALUES", "+ ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' OF TENSEGRITY SOFT ROBOTS CONTROLLERS \\n\\t'", "'population': [individual.info(coev=coev) for individual in population] })) if verbose: if pbar is not", "+ ']') else: print('\\t'*level + '{}: {}'.format(k, v)) def print_header(config, bar_num=50): print('# '", "as out_file: out_file.write('rob_string,fitness\\n') for rob_string, fit in history.items(): out_file.write('{},{}\\n'.format(rob_string.strip(), fit[0])) # NOTE: these", "robot # ================================== # # SIM HISTORY MANAGEMENT # # ================================== # def", "def print_header_coev(config, bar_num=55): print('# ' + '=' * bar_num + ' #') print('\\t\\tCO-EVOLUTIONARY", "len(individuals) return to_evaluate + skip, len(skip), n_evaluations def plot_population_stats(pop, results_folder, seed, num_sim, normalize=False):", "os.path.join(results_folder, 'stiffness', 'stiff_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': STIFF_TABLE, 'discrete_hist': True, 'norm': normalize } ] for", "not os.path.exists(history_db) conn = sqlite3.connect(history_db) cursor = conn.cursor() # create the if to_init:", "print_header_coev(config, bar_num=55): print('# ' + '=' * bar_num + ' #') print('\\t\\tCO-EVOLUTIONARY SIMULATION\\n\\t'", "inv_ind): if stats is not None: record = stats.compile(pop) if stats is not", "v in sorted(d.items()): if isinstance(v, dict): print('\\t'*level + k+':') print_dict(v, level+1, list_on_levels) elif", ":param history_file: :return: \"\"\" with open(history_file, 'w') as out_file: out_file.write('rob_string,fitness\\n') for rob_string, fit", "'title': '# modules', 'h_range': (2, 11), 'out_file': os.path.join(results_folder, 'n_modules', 'num_mods_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': N_MODULES,", "' #') print_dict(config) print('# ' + '='*bar_num + ' #') def print_header_contr_evo(config, bar_num=50):", "provided db. In case no db is given, returns an empty history :param", "TENSEGRITY SOFT ROBOTS\\n\\t' + (' ' * 2 + 'FOR GOAL REACHING AFTER", ":param seed: simulation seed :param num_sim: simulation id :param normalize: whether to reports", "0: fitnesses = toolbox.map(toolbox.evaluate, to_evaluate) n_evaluations = len(fitnesses) for ind, fit in zip(to_evaluate,", "into three lists fits, n_mods, stiffs = list(zip(*[(ind.get_fitness(), ind.num_modules, ind.stiffness) for ind in", "' + '=' * bar_num + ' #') print_dict(config, list_on_levels=False) print('# ' +", "(float(fitness),) cursor.close() conn.close() return history def store_history_db(history, history_db): \"\"\" Store simulation history in", "+ '=' * bar_num + ' #') print('\\t\\t EVOLUTIONARY SIMULATION\\n\\t' + ' '", "= ind.string_input() if not coev and ind_string in glob_history: # assign fitness previously", "invalid_ind: ind_string = ind.string_input() if not coev and ind_string in glob_history: # assign", "None: conn = sqlite3.connect(history_db) cursor = conn.cursor() for robot_string, fitness in cursor.execute('SELECT *", "'title': 'Fitness', 'h_range': (0, np.max(fits)), 'out_file': os.path.join(results_folder, 'fitness', 'fit_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': max(len(fits)//4, 1),", "}) else: print('num_sims: {} | Fitness -> avg: {} std: {} min: {}", "In case no file is given, returns an empty history :param history_file: :return:", "'min': pop_stats['min'], 'max': pop_stats['max'], 'skip': skips }) else: print('num_sims: {} | Fitness ->", "Fitness -> avg: {} std: {} min: {} max: {}'.format( pop_stats['num_sims'], pop_stats['avg_fitness'], pop_stats['std_dev'],", "= 300 # create potential needed directory where to store the graph os.makedirs(os.path.dirname(conf['out_file']),", "np.asarray(list(map(lambda i: i.fitness.values, population))) if coev: fitness_values = [1.0 / f for f", "case no db is given, returns an empty history :param history_db: :return: \"\"\"", "for module_str in rb_string.split('--')[:-1]: params = module_str.split('-') module = { 'order': int(params[0].strip()), 'connectedModules':", "if verbose: if pbar is not None: pbar.set_postfix({ 'avg': pop_stats['avg_fitness'], 'std': pop_stats['std_dev'], 'min':", "SIM HISTORY MANAGEMENT # # ================================== # def load_history(history_file): \"\"\" Load simulation history", "' * 10 + 'FOR GOAL REACHING TASK') ) print('# ' + '='", "SIMULATION\\n\\t' + ' OF TENSEGRITY SOFT ROBOTS CONTROLLERS \\n\\t' + ('FOR GOAL REACHING", "of the values of given property across :param conf: :return: \"\"\" font =", "print('# ' + '=' * bar_num + ' #') print('\\t\\t EVOLUTIONARY SIMULATION\\n\\t' +", "'discrete_hist': True, 'norm': normalize }, { 'data': stiffs, 'title': 'Stiffness', 'out_file': os.path.join(results_folder, 'stiffness',", "yield record to_init = not os.path.exists(history_db) conn = sqlite3.connect(history_db) cursor = conn.cursor() #", "(c, p) in enumerate(zip(bins, patches)): plt.setp(p, 'facecolor', colors[i]) ax.set_title('{} distribution across last generation'.format(conf['title']),", "fit in history.items(): out_file.write('{},{}\\n'.format(rob_string.strip(), fit[0])) # NOTE: these functions below are currently not", "cursor.execute('''CREATE TABLE history(robot_string VARCHAR PRIMARY KEY, fitness REAL NOT NULL)''') cursor.executemany('''REPLACE INTO history(robot_string,", "= line.strip().split(',') history[robot_string.strip()] = (float(fitness),) return history def store_history(history, history_file): \"\"\" Store simulation", "modules', 'h_range': (2, 11), 'out_file': os.path.join(results_folder, 'n_modules', 'num_mods_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': N_MODULES, 'discrete_hist': True,", "= len(fitnesses) for ind, fit in zip(to_evaluate, fitnesses): ind.fitness.values = fit ind_string =", "case no file is given, returns an empty history :param history_file: :return: \"\"\"", "(level+1) + '{}'.format(element)) print('\\t' * level + ']') else: print('\\t'*level + '{}: {}'.format(k,", "# split robots interested properties into three lists fits, n_mods, stiffs = list(zip(*[(ind.get_fitness(),", "ind_string = ind.string_input() if not coev and ind_string in glob_history: # assign fitness", "#') print('\\t' + ' ' * 7 + 'EXPERIMENT CONFIGURATION') print('# ' +", "* 7 + '(DOUBLE MAP-ELITES VARIANT)' ) print('# ' + '=' * bar_num", "8 + 'FOR GOAL REACHING TASK') ) print('# ' + '=' * bar_num", "================================== # # SIM HISTORY MANAGEMENT # # ================================== # def load_history(history_file): \"\"\"", "patches = ax.hist(conf['data'], bins=conf['bins'], density=conf['norm'], range=conf['h_range']) ax.set_xticks(bins) for i, (c, p) in enumerate(zip(bins,", "in a normalized manner (default: False) :return: \"\"\" # split robots interested properties", "' * 2 + 'FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in", "matplotlib.pyplot as plt from params_conf import N_MODULES, STIFF_TABLE # ================================== # # Utils", "MAP-ELITES VARIANT)' ) print('# ' + '=' * bar_num + ' #\\n') print('#", "consider only new individuals (offsprings) invalid_ind = [ind for ind in individuals if", "values of given property across :param conf: :return: \"\"\" font = {'family': 'Source", "in fitness_values] pop_stats = { 'num_sims': num_sims, 'avg_fitness': np.mean(fitness_values), 'std_dev': np.std(fitness_values), 'min': np.min(fitness_values),", "'avg': pop_stats['avg_fitness'], 'std': pop_stats['std_dev'], 'min': pop_stats['min'], 'max': pop_stats['max'], 'skip': skips }) else: print('num_sims:", "list_on_levels=False) print('# ' + '=' * bar_num + ' #') def print_header_double_map(config, bar_num=55):", "= fit ind_string = ind.string_input() # update history records with latest fitness glob_history[ind_string]", "= {str(sv): 0 for sv in conf['bins']} for ind_stiff in conf['data']: inds_vals[str(ind_stiff)] +=", "+ 'OF MORPHOLOGY AND CONTROLLER\\n\\t' + ' ' * 8 + 'OF TENSEGRITY", "glob_history[ind_string] # store also record into a local history to support MAP creation", "store_history_db(history, history_db): \"\"\" Store simulation history in provided db :param history: :param history_db:", "#') print('\\t\\t EVOLUTIONARY SIMULATION\\n\\t' + ' ' * 7 + 'OF MORPHOLOGY AND", "to_evaluate) n_evaluations = len(fitnesses) for ind, fit in zip(to_evaluate, fitnesses): ind.fitness.values = fit", "* bar_num + ' #\\n') print('# ' + '=' * bar_num + '", "is not None else {} logbook.record(gen=gen, nevals=len(inv_ind), **record) def record_population(num_sims, population, file, skips,", "history in provided db :param history: :param history_db: :return: \"\"\" def history_gen(): for", "SOFT ROBOTS') print('# ' + '='*bar_num + ' #\\n') print('# ' + '='*bar_num", "potential needed directory where to store the graph os.makedirs(os.path.dirname(conf['out_file']), exist_ok=True) num_colors = len(conf['bins'])", "HISTORY MANAGEMENT # # ================================== # def load_history(history_file): \"\"\" Load simulation history from", "'data': fits, 'title': 'Fitness', 'h_range': (0, np.max(fits)), 'out_file': os.path.join(results_folder, 'fitness', 'fit_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins':", "ind_stiff in conf['data']: inds_vals[str(ind_stiff)] += 1 x, y = list(zip(*[(k, v) for k,", "#\\n') print('# ' + '=' * bar_num + ' #') print('\\t' + '", "#\\n') print('# ' + '='*bar_num + ' #') print('\\t' + ' '*7 +", "' + '=' * bar_num + ' #') def print_header_single_map(config, bar_num=55): print('# '", "bins, patches = ax.hist(conf['data'], bins=conf['bins'], density=conf['norm'], range=conf['h_range']) ax.set_xticks(bins) for i, (c, p) in", "'discrete_hist': True, 'norm': normalize } ] for conf in configs: plot_population_dist(conf) def plot_population_dist(conf):", "def print_dict(d, level=0, list_on_levels=False): for k, v in sorted(d.items()): if isinstance(v, dict): print('\\t'*level", "invalid_ind = [ind for ind in individuals if (not ind.fitness.valid or (coev and", "plot_population_stats(pop, results_folder, seed, num_sim, normalize=False): \"\"\" Plot the fitness/num_modules/stiffness distribution across given robot", "float(r[0]))])) if conf['norm']: y = np.asarray(y)/np.sum(y) ax.bar(x, y, color=colors) else: _, bins, patches", "+ ' #') print('\\t\\t EVOLUTIONARY SIMULATION\\n\\t' + ' ' * 7 + 'OF", "fitness in cursor.execute('SELECT * FROM history'): history[robot_string] = (float(fitness),) cursor.close() conn.close() return history", "(not ind.fitness.valid or (coev and eval_all))] # select for evaluation only solutions #", "* 10 + 'FOR GOAL REACHING TASK') + '\\n\\t' + ' ' *", "history_db: :return: \"\"\" def history_gen(): for record in history.items(): yield record to_init =", "local_history[ind_string] = fit if not coev: n_evaluations = len(individuals) return to_evaluate + skip,", "+ 'FOR GOAL REACHING TASK') + '\\n\\t' + ' ' * 7 +", "for ind_stiff in conf['data']: inds_vals[str(ind_stiff)] += 1 x, y = list(zip(*[(k, v) for", "= module_str.split('-') module = { 'order': int(params[0].strip()), 'connectedModules': int(params[1].strip()), 'connectedFaces': int(params[2].strip()), 'freq': float(params[3].strip()),", "in config['simulation_path'] else ' ' * 10 + 'FOR GOAL REACHING TASK') )", "matplotlib.use('pdf') import matplotlib.pyplot as plt from params_conf import N_MODULES, STIFF_TABLE # ================================== #", "} ] for conf in configs: plot_population_dist(conf) def plot_population_dist(conf): \"\"\" Plot the distribution", "' ' * 7 + '(SINGLE MAP-ELITES VARIANT)' ) print('# ' + '='", "= [ind for ind in individuals if (not ind.fitness.valid or (coev and eval_all))]", "+ '='*bar_num + ' #') def print_header_contr_evo(config, bar_num=50): print('# ' + '=' *", "= sqlite3.connect(history_db) cursor = conn.cursor() # create the if to_init: cursor.execute('''CREATE TABLE history(robot_string", "print('num_sims: {} | Fitness -> avg: {} std: {} min: {} max: {}'.format(", "# # Utils functions # # ================================== # def print_dict(d, level=0, list_on_levels=False): for", "for f in fitness_values] pop_stats = { 'num_sims': num_sims, 'avg_fitness': np.mean(fitness_values), 'std_dev': np.std(fitness_values),", "'stiffness', 'stiff_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': STIFF_TABLE, 'discrete_hist': True, 'norm': normalize } ] for conf", "def store_history(history, history_file): \"\"\" Store simulation history in provided file :param history: :param", "print('# ' + '='*bar_num + ' #\\n') print('# ' + '='*bar_num + '", "**record) def record_population(num_sims, population, file, skips, pbar=None, verbose=False, coev=False): fitness_values = np.asarray(list(map(lambda i:", "float(params[5].strip()), 'rot': float(params[6].strip()), 'stiff': float(params[7].strip()) } robot.append(module) return robot # ================================== # #", "{ 'data': fits, 'title': 'Fitness', 'h_range': (0, np.max(fits)), 'out_file': os.path.join(results_folder, 'fitness', 'fit_dist_sim_{}_{}.pdf'.format(seed, num_sim)),", "'data': n_mods, 'title': '# modules', 'h_range': (2, 11), 'out_file': os.path.join(results_folder, 'n_modules', 'num_mods_dist_sim_{}_{}.pdf'.format(seed, num_sim)),", "previously computed for the same configuration ind.fitness.values = glob_history[ind_string] # store also record", "'discrete_hist': False, 'norm': normalize }, { 'data': n_mods, 'title': '# modules', 'h_range': (2,", "+ ' #') print('\\t' + ' ' * 8 + 'EXPERIMENT CONFIGURATION') print('#", "generation configs = [ { 'data': fits, 'title': 'Fitness', 'h_range': (0, np.max(fits)), 'out_file':", "(float(fitness),) return history def store_history(history, history_file): \"\"\" Store simulation history in provided file", "glob_history[ind_string] = fit # store also record into a local history to support", "ROBOTS') print('# ' + '='*bar_num + ' #\\n') print('# ' + '='*bar_num +", "interested properties into three lists fits, n_mods, stiffs = list(zip(*[(ind.get_fitness(), ind.num_modules, ind.stiffness) for", "not None: local_history[ind_string] = glob_history[ind_string] skip.append(ind) else: to_evaluate.append(ind) n_evaluations = 0 if len(to_evaluate)", "max(len(fits)//4, 1), 'discrete_hist': False, 'norm': normalize }, { 'data': n_mods, 'title': '# modules',", "db. In case no db is given, returns an empty history :param history_db:", "GOAL REACHING TASK') + '\\n\\t' + ' ' * 7 + '(SINGLE MAP-ELITES", "'size': 12, 'weight': 'light'} matplotlib.rc('font', **font) matplotlib.rcParams[\"axes.titlepad\"] = 15 matplotlib.rcParams['figure.dpi'] = 300 #", "+ skip, len(skip), n_evaluations def plot_population_stats(pop, results_folder, seed, num_sim, normalize=False): \"\"\" Plot the", "empty history :param history_file: :return: \"\"\" history = {} if history_file is not", "[individual.info(coev=coev) for individual in population] })) if verbose: if pbar is not None:", "'FOR GOAL REACHING TASK') ) print('# ' + '=' * bar_num + '", "SQUEEZING TASK' if 'SGR' in config['simulation_path'] else ' ' * 10 + 'FOR", "to_init = not os.path.exists(history_db) conn = sqlite3.connect(history_db) cursor = conn.cursor() # create the", "to support MAP creation functionality if local_history is not None: local_history[ind_string] = glob_history[ind_string]", "= {} if history_db is not None: conn = sqlite3.connect(history_db) cursor = conn.cursor()", "REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path'] else ' ' * 8", "\"\"\" def history_gen(): for record in history.items(): yield record to_init = not os.path.exists(history_db)", "configs = [ { 'data': fits, 'title': 'Fitness', 'h_range': (0, np.max(fits)), 'out_file': os.path.join(results_folder,", "TASK' if 'SGR' in config['simulation_path'] else ' ' * 10 + 'FOR GOAL", "else ' ' * 8 + 'FOR GOAL REACHING TASK') ) print('# '", "ROBOTS\\n\\t' + (' ' * 2 + 'FOR GOAL REACHING AFTER SQUEEZING TASK'", "list_on_levels=False) print('# ' + '=' * bar_num + ' #') def print_header_single_map(config, bar_num=55):", ":param results_folder: folder where to store the plots :param seed: simulation seed :param", "print('\\t' + ' ' * 8 + 'EXPERIMENT CONFIGURATION') print('# ' + '='", "history_file: :return: \"\"\" history = {} if history_file is not None and os.path.exists(history_file):", "history to support MAP creation functionality if local_history is not None: local_history[ind_string] =", "local_history is not None: local_history[ind_string] = glob_history[ind_string] skip.append(ind) else: to_evaluate.append(ind) n_evaluations = 0", "else: to_evaluate.append(ind) n_evaluations = 0 if len(to_evaluate) > 0: fitnesses = toolbox.map(toolbox.evaluate, to_evaluate)", "that have not been already evaluated skip = [] to_evaluate = [] for", "return history def store_history_db(history, history_db): \"\"\" Store simulation history in provided db :param", "dictionary that maps simulator input strings into corresponding computed fitness. :param local_history: :param", "int(params[0].strip()), 'connectedModules': int(params[1].strip()), 'connectedFaces': int(params[2].strip()), 'freq': float(params[3].strip()), 'amplitude': float(params[4].strip()), 'phase': float(params[5].strip()), 'rot': float(params[6].strip()),", ":param individuals: :param glob_history: a dictionary that maps simulator input strings into corresponding", "ax.set_ylabel('# Individuals') if conf['norm']: ax.set_ylim(0, 1.1) plt.savefig(conf['out_file'], bbox_inches='tight') plt.close() def parse_robot_string(rb_string): robot =", "+ ' #') def print_header_coev(config, bar_num=55): print('# ' + '=' * bar_num +", "reports the plots in a normalized manner (default: False) :return: \"\"\" # split", "into corresponding computed fitness. :param local_history: :param coev: :param eval_all: :return: \"\"\" #", "def plot_population_stats(pop, results_folder, seed, num_sim, normalize=False): \"\"\" Plot the fitness/num_modules/stiffness distribution across given", "history: :param history_db: :return: \"\"\" def history_gen(): for record in history.items(): yield record", "robots interested properties into three lists fits, n_mods, stiffs = list(zip(*[(ind.get_fitness(), ind.num_modules, ind.stiffness)", "Store simulation history in provided file :param history: :param history_file: :return: \"\"\" with", "' #') print('\\t' + ' '*7 + 'EXPERIMENT CONFIGURATION') print('# ' + '='*bar_num", "db is given, returns an empty history :param history_db: :return: \"\"\" history =", "record_population(num_sims, population, file, skips, pbar=None, verbose=False, coev=False): fitness_values = np.asarray(list(map(lambda i: i.fitness.values, population)))", "_, bins, patches = ax.hist(conf['data'], bins=conf['bins'], density=conf['norm'], range=conf['h_range']) ax.set_xticks(bins) for i, (c, p)", "in sorted(d.items()): if isinstance(v, dict): print('\\t'*level + k+':') print_dict(v, level+1, list_on_levels) elif isinstance(v,", "'='*bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' '*7 + 'OF TENSEGRITY SOFT", "**pop_stats, 'population': [individual.info(coev=coev) for individual in population] })) if verbose: if pbar is", "isinstance(conf['bins'], list) else conf['bins'] colors = plt.cm.viridis(np.linspace(0, 1, num_colors)) fig = plt.figure(figsize=(12, 5))", "# # ================================== # def print_dict(d, level=0, list_on_levels=False): for k, v in sorted(d.items()):", "from provided file. In case no file is given, returns an empty history", "and eval_all))] # select for evaluation only solutions # that have not been", "not None: record = stats.compile(pop) if stats is not None else {} logbook.record(gen=gen,", "config['simulation_path'] else ' ' * 8 + 'FOR GOAL REACHING TASK') ) print('#", "pop_stats['max'], 'skip': skips }) else: print('num_sims: {} | Fitness -> avg: {} std:", "print_header_single_map(config, bar_num=55): print('# ' + '=' * bar_num + ' #') print('\\t\\t EVOLUTIONARY", "from provided db. In case no db is given, returns an empty history", "list_on_levels=False): for k, v in sorted(d.items()): if isinstance(v, dict): print('\\t'*level + k+':') print_dict(v,", "is not None: record = stats.compile(pop) if stats is not None else {}", "a local history to support MAP creation functionality if local_history is not None:", "+ ' #') print('\\t\\tCO-EVOLUTIONARY SIMULATION\\n\\t' + ' ' * 7 + 'OF MORPHOLOGY", "individuals, glob_history, local_history=None, coev=False, eval_all=False): \"\"\" :param toolbox: :param individuals: :param glob_history: a", "TASK') + '\\n\\t' + ' ' * 7 + '(DOUBLE MAP-ELITES VARIANT)' )", "the plots :param seed: simulation seed :param num_sim: simulation id :param normalize: whether", "print_dict(d, level=0, list_on_levels=False): for k, v in sorted(d.items()): if isinstance(v, dict): print('\\t'*level +", "returns an empty history :param history_file: :return: \"\"\" history = {} if history_file", "to store the plots :param seed: simulation seed :param num_sim: simulation id :param", "print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' OF TENSEGRITY SOFT ROBOTS CONTROLLERS \\n\\t' + ('FOR GOAL", "the graph os.makedirs(os.path.dirname(conf['out_file']), exist_ok=True) num_colors = len(conf['bins']) if isinstance(conf['bins'], list) else conf['bins'] colors", "11), 'out_file': os.path.join(results_folder, 'n_modules', 'num_mods_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': N_MODULES, 'discrete_hist': True, 'norm': normalize },", "+ ' #') print_dict(config, list_on_levels=True) print('# ' + '=' * bar_num + '", "num_sim: simulation id :param normalize: whether to reports the plots in a normalized", "* bar_num + ' #') print('\\t' + ' ' * 7 + 'EXPERIMENT", "= list(zip(*[(k, v) for k, v in sorted(inds_vals.items(), key=lambda r: float(r[0]))])) if conf['norm']:", "in conf['bins']} for ind_stiff in conf['data']: inds_vals[str(ind_stiff)] += 1 x, y = list(zip(*[(k,", "if history_file is not None and os.path.exists(history_file): with open(history_file) as history_in: # skip", "if isinstance(v, dict): print('\\t'*level + k+':') print_dict(v, level+1, list_on_levels) elif isinstance(v, list) and", "def record_info(logbook, stats, gen, pop, inv_ind): if stats is not None: record =", "glob_history: a dictionary that maps simulator input strings into corresponding computed fitness. :param", "bar_num=50): print('# ' + '=' * bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' +", "(' ' * 2 + 'FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR'", "np.mean(fitness_values), 'std_dev': np.std(fitness_values), 'min': np.min(fitness_values), 'max': np.max(fitness_values) } # store the current population", "and list_on_levels and k == 'modules_conf': print('\\t' * level + '{}: ['.format(k)) for", "not used. def load_history_db(history_db): \"\"\" Load simulation history from provided db. In case", "ind.stiffness) for ind in pop])) # plot the properties distribution of last generation", "open(filename, 'wb') as cp_file: pickle.dump(checkpoint, cp_file) def record_info(logbook, stats, gen, pop, inv_ind): if", "fit[0])) # NOTE: these functions below are currently not used. def load_history_db(history_db): \"\"\"", "+ ' #') def print_header_single_map(config, bar_num=55): print('# ' + '=' * bar_num +", "int(params[2].strip()), 'freq': float(params[3].strip()), 'amplitude': float(params[4].strip()), 'phase': float(params[5].strip()), 'rot': float(params[6].strip()), 'stiff': float(params[7].strip()) } robot.append(module)", "store the current population values file.write(json.dumps({ **pop_stats, 'population': [individual.info(coev=coev) for individual in population]", "a normalized manner (default: False) :return: \"\"\" # split robots interested properties into", "{} std: {} min: {} max: {}'.format( pop_stats['num_sims'], pop_stats['avg_fitness'], pop_stats['std_dev'], pop_stats['min'], pop_stats['max'] ))", "True, 'norm': normalize } ] for conf in configs: plot_population_dist(conf) def plot_population_dist(conf): \"\"\"", "# select for evaluation only solutions # that have not been already evaluated", "* 10 + 'FOR GOAL REACHING TASK') ) print('# ' + '=' *", "SIMULATION\\n\\t' + ' ' * 7 + 'OF MORPHOLOGY AND CONTROLLER\\n\\t' + '", "provided db :param history: :param history_db: :return: \"\"\" def history_gen(): for record in", "'=' * bar_num + ' #') print_dict(config, list_on_levels=True) print('# ' + '=' *", "pop])) # plot the properties distribution of last generation configs = [ {", "num_sim)), 'bins': N_MODULES, 'discrete_hist': True, 'norm': normalize }, { 'data': stiffs, 'title': 'Stiffness',", "= [] for ind in invalid_ind: ind_string = ind.string_input() if not coev and", "'=' * bar_num + ' #') def print_header_double_map(config, bar_num=55): print('# ' + '='", "Plot the distribution of the values of given property across :param conf: :return:", "sqlite3.connect(history_db) cursor = conn.cursor() # create the if to_init: cursor.execute('''CREATE TABLE history(robot_string VARCHAR", "= np.asarray(y)/np.sum(y) ax.bar(x, y, color=colors) else: _, bins, patches = ax.hist(conf['data'], bins=conf['bins'], density=conf['norm'],", "seed :param num_sim: simulation id :param normalize: whether to reports the plots in", "for robot_string, fitness in cursor.execute('SELECT * FROM history'): history[robot_string] = (float(fitness),) cursor.close() conn.close()", "max: {}'.format( pop_stats['num_sims'], pop_stats['avg_fitness'], pop_stats['std_dev'], pop_stats['min'], pop_stats['max'] )) def evaluate_ind(toolbox, individuals, glob_history, local_history=None,", "' '*7 + 'OF TENSEGRITY SOFT ROBOTS') print('# ' + '='*bar_num + '", "print('# ' + '='*bar_num + ' #') print('\\t' + ' '*7 + 'EXPERIMENT", "* bar_num + ' #') def print_header_single_map(config, bar_num=55): print('# ' + '=' *", "folder where to store the plots :param seed: simulation seed :param num_sim: simulation", "+ 'FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path'] else '", "FROM history'): history[robot_string] = (float(fitness),) cursor.close() conn.close() return history def store_history_db(history, history_db): \"\"\"", "pop_stats['min'], 'max': pop_stats['max'], 'skip': skips }) else: print('num_sims: {} | Fitness -> avg:", "if 'SGR' in config['simulation_path'] else ' ' * 10 + 'FOR GOAL REACHING", "with open(history_file) as history_in: # skip header history_in.readline() for line in history_in: robot_string,", "#') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' OF TENSEGRITY SOFT ROBOTS CONTROLLERS \\n\\t' + ('FOR", "'bins': STIFF_TABLE, 'discrete_hist': True, 'norm': normalize } ] for conf in configs: plot_population_dist(conf)", "AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path'] else ' ' * 8 +", "been already evaluated skip = [] to_evaluate = [] for ind in invalid_ind:", ":param history_db: :return: \"\"\" def history_gen(): for record in history.items(): yield record to_init", "' ' * 8 + 'EXPERIMENT CONFIGURATION') print('# ' + '=' * bar_num", "int(params[1].strip()), 'connectedFaces': int(params[2].strip()), 'freq': float(params[3].strip()), 'amplitude': float(params[4].strip()), 'phase': float(params[5].strip()), 'rot': float(params[6].strip()), 'stiff': float(params[7].strip())", "' #\\n') print('# ' + '='*bar_num + ' #') print('\\t' + ' '*7", "#') print('\\t\\tCO-EVOLUTIONARY SIMULATION\\n\\t' + ' ' * 7 + 'OF MORPHOLOGY AND CONTROLLER\\n\\t'", "SQUEEZING TASK' if 'SGR' in config['simulation_path'] else ' ' * 8 + 'FOR", "currently not used. def load_history_db(history_db): \"\"\" Load simulation history from provided db. In", "distribution of the values of given property across :param conf: :return: \"\"\" font", "'\\n\\t' + ' ' * 7 + '(SINGLE MAP-ELITES VARIANT)' ) print('# '", "conn = sqlite3.connect(history_db) cursor = conn.cursor() # create the if to_init: cursor.execute('''CREATE TABLE", "num_colors)) fig = plt.figure(figsize=(12, 5)) ax = fig.gca() if conf['discrete_hist']: inds_vals = {str(sv):", "' * 7 + 'EXPERIMENT CONFIGURATION') print('# ' + '=' * bar_num +", "seed: simulation seed :param num_sim: simulation id :param normalize: whether to reports the", "\"\"\" :param toolbox: :param individuals: :param glob_history: a dictionary that maps simulator input", "elif isinstance(v, list) and list_on_levels and k == 'modules_conf': print('\\t' * level +", "normalize: whether to reports the plots in a normalized manner (default: False) :return:", "**font) matplotlib.rcParams[\"axes.titlepad\"] = 15 matplotlib.rcParams['figure.dpi'] = 300 # create potential needed directory where", "a dictionary that maps simulator input strings into corresponding computed fitness. :param local_history:", "* bar_num + ' #') print_dict(config, list_on_levels=False) print('# ' + '=' * bar_num", "] for conf in configs: plot_population_dist(conf) def plot_population_dist(conf): \"\"\" Plot the distribution of", "configs: plot_population_dist(conf) def plot_population_dist(conf): \"\"\" Plot the distribution of the values of given", "= (float(fitness),) cursor.close() conn.close() return history def store_history_db(history, history_db): \"\"\" Store simulation history", "the current population values file.write(json.dumps({ **pop_stats, 'population': [individual.info(coev=coev) for individual in population] }))", "coev: :param eval_all: :return: \"\"\" # consider only new individuals (offsprings) invalid_ind =", "def load_history_db(history_db): \"\"\" Load simulation history from provided db. In case no db", "CONFIGURATION') print('# ' + '=' * bar_num + ' #') print_dict(config, list_on_levels=True) print('#", "================================== # # Utils functions # # ================================== # def print_dict(d, level=0, list_on_levels=False):", "print('\\t' * level + ']') else: print('\\t'*level + '{}: {}'.format(k, v)) def print_header(config,", "(2, 11), 'out_file': os.path.join(results_folder, 'n_modules', 'num_mods_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': N_MODULES, 'discrete_hist': True, 'norm': normalize", "solutions # that have not been already evaluated skip = [] to_evaluate =", "' * 7 + '(SINGLE MAP-ELITES VARIANT)' ) print('# ' + '=' *", "record in history.items(): yield record to_init = not os.path.exists(history_db) conn = sqlite3.connect(history_db) cursor", ":param eval_all: :return: \"\"\" # consider only new individuals (offsprings) invalid_ind = [ind", "p) in enumerate(zip(bins, patches)): plt.setp(p, 'facecolor', colors[i]) ax.set_title('{} distribution across last generation'.format(conf['title']), fontweight='normal')", "[1.0 / f for f in fitness_values] pop_stats = { 'num_sims': num_sims, 'avg_fitness':", "creation functionality if local_history is not None: local_history[ind_string] = glob_history[ind_string] skip.append(ind) else: to_evaluate.append(ind)", "else conf['bins'] colors = plt.cm.viridis(np.linspace(0, 1, num_colors)) fig = plt.figure(figsize=(12, 5)) ax =", "skip = [] to_evaluate = [] for ind in invalid_ind: ind_string = ind.string_input()", "ind.num_modules, ind.stiffness) for ind in pop])) # plot the properties distribution of last", "history = {} if history_file is not None and os.path.exists(history_file): with open(history_file) as", "'=' * bar_num + ' #\\n') print('# ' + '=' * bar_num +", "GOAL REACHING TASK') ) print('# ' + '=' * bar_num + ' #\\n')", "id :param normalize: whether to reports the plots in a normalized manner (default:", "print('# ' + '=' * bar_num + ' #') def print_header_double_map(config, bar_num=55): print('#", "in glob_history: # assign fitness previously computed for the same configuration ind.fitness.values =", "provided file :param history: :param history_file: :return: \"\"\" with open(history_file, 'w') as out_file:", "np.std(fitness_values), 'min': np.min(fitness_values), 'max': np.max(fitness_values) } # store the current population values file.write(json.dumps({", "given, returns an empty history :param history_db: :return: \"\"\" history = {} if", "robot_string, fitness in cursor.execute('SELECT * FROM history'): history[robot_string] = (float(fitness),) cursor.close() conn.close() return", "is not None: conn = sqlite3.connect(history_db) cursor = conn.cursor() for robot_string, fitness in", "ax = fig.gca() if conf['discrete_hist']: inds_vals = {str(sv): 0 for sv in conf['bins']}", "' * 7 + '(DOUBLE MAP-ELITES VARIANT)' ) print('# ' + '=' *", "v in sorted(inds_vals.items(), key=lambda r: float(r[0]))])) if conf['norm']: y = np.asarray(y)/np.sum(y) ax.bar(x, y,", "'='*bar_num + ' #') def print_header_contr_evo(config, bar_num=50): print('# ' + '=' * bar_num", "'=' * bar_num + ' #') def store_checkpoint(checkpoint, filename): with open(filename, 'wb') as", "fitness = line.strip().split(',') history[robot_string.strip()] = (float(fitness),) return history def store_history(history, history_file): \"\"\" Store", "[ { 'data': fits, 'title': 'Fitness', 'h_range': (0, np.max(fits)), 'out_file': os.path.join(results_folder, 'fitness', 'fit_dist_sim_{}_{}.pdf'.format(seed,", "np.min(fitness_values), 'max': np.max(fitness_values) } # store the current population values file.write(json.dumps({ **pop_stats, 'population':", "* (level+1) + '{}'.format(element)) print('\\t' * level + ']') else: print('\\t'*level + '{}:", "(coev and eval_all))] # select for evaluation only solutions # that have not", "fitness. :param local_history: :param coev: :param eval_all: :return: \"\"\" # consider only new", "def evaluate_ind(toolbox, individuals, glob_history, local_history=None, coev=False, eval_all=False): \"\"\" :param toolbox: :param individuals: :param", "print('# ' + '=' * bar_num + ' #') print('\\t' + ' '", "KEY, fitness REAL NOT NULL)''') cursor.executemany('''REPLACE INTO history(robot_string, fitness) VALUES (?)''', history_gen()) cursor.close()", "'{}: {}'.format(k, v)) def print_header(config, bar_num=50): print('# ' + '='*bar_num + ' #')", "os.path.exists(history_file): with open(history_file) as history_in: # skip header history_in.readline() for line in history_in:", "15 matplotlib.rcParams['figure.dpi'] = 300 # create potential needed directory where to store the", "bar_num + ' #') print_dict(config, list_on_levels=False) print('# ' + '=' * bar_num +", "load_history_db(history_db): \"\"\" Load simulation history from provided db. In case no db is", "+ '(DOUBLE MAP-ELITES VARIANT)' ) print('# ' + '=' * bar_num + '", "+ ' #') def print_header_double_map(config, bar_num=55): print('# ' + '=' * bar_num +", "in config['simulation_path'] else ' ' * 10 + 'FOR GOAL REACHING TASK') +", "TASK' if 'SGR' in config['simulation_path'] else ' ' * 8 + 'FOR GOAL", "{ 'num_sims': num_sims, 'avg_fitness': np.mean(fitness_values), 'std_dev': np.std(fitness_values), 'min': np.min(fitness_values), 'max': np.max(fitness_values) } #", "f in fitness_values] pop_stats = { 'num_sims': num_sims, 'avg_fitness': np.mean(fitness_values), 'std_dev': np.std(fitness_values), 'min':", ") print('# ' + '=' * bar_num + ' #\\n') print('# ' +", "if conf['discrete_hist']: inds_vals = {str(sv): 0 for sv in conf['bins']} for ind_stiff in", "level=0, list_on_levels=False): for k, v in sorted(d.items()): if isinstance(v, dict): print('\\t'*level + k+':')", "history_db): \"\"\" Store simulation history in provided db :param history: :param history_db: :return:", "ind in individuals if (not ind.fitness.valid or (coev and eval_all))] # select for", "#') def print_header_double_map(config, bar_num=55): print('# ' + '=' * bar_num + ' #')", "fig.gca() if conf['discrete_hist']: inds_vals = {str(sv): 0 for sv in conf['bins']} for ind_stiff", "+ '=' * bar_num + ' #') print('\\t' + ' ' * 8", ":param conf: :return: \"\"\" font = {'family': 'Source Sans Pro', 'size': 12, 'weight':", "+ '{}: ['.format(k)) for element in v: print('\\t' * (level+1) + '{}'.format(element)) print('\\t'", "+ ' #') print_dict(config) print('# ' + '='*bar_num + ' #') def print_header_contr_evo(config,", "isinstance(v, list) and list_on_levels and k == 'modules_conf': print('\\t' * level + '{}:", "+ '='*bar_num + ' #\\n') print('# ' + '='*bar_num + ' #') print('\\t'", "level + '{}: ['.format(k)) for element in v: print('\\t' * (level+1) + '{}'.format(element))", "TENSEGRITY SOFT ROBOTS CONTROLLERS \\n\\t' + ('FOR GOAL REACHING AFTER SQUEEZING TASK' if", "print('\\t' * (level+1) + '{}'.format(element)) print('\\t' * level + ']') else: print('\\t'*level +", "-> avg: {} std: {} min: {} max: {}'.format( pop_stats['num_sims'], pop_stats['avg_fitness'], pop_stats['std_dev'], pop_stats['min'],", "history from provided db. In case no db is given, returns an empty", "' ' * 8 + 'FOR GOAL REACHING TASK') ) print('# ' +", "of robot individuals :param results_folder: folder where to store the plots :param seed:", "os import json import pickle import sqlite3 import numpy as np import matplotlib", "'bins': N_MODULES, 'discrete_hist': True, 'norm': normalize }, { 'data': stiffs, 'title': 'Stiffness', 'out_file':", "for i, (c, p) in enumerate(zip(bins, patches)): plt.setp(p, 'facecolor', colors[i]) ax.set_title('{} distribution across", "'connectedFaces': int(params[2].strip()), 'freq': float(params[3].strip()), 'amplitude': float(params[4].strip()), 'phase': float(params[5].strip()), 'rot': float(params[6].strip()), 'stiff': float(params[7].strip()) }", "ind in invalid_ind: ind_string = ind.string_input() if not coev and ind_string in glob_history:", "where to store the plots :param seed: simulation seed :param num_sim: simulation id", "else {} logbook.record(gen=gen, nevals=len(inv_ind), **record) def record_population(num_sims, population, file, skips, pbar=None, verbose=False, coev=False):", "i.fitness.values, population))) if coev: fitness_values = [1.0 / f for f in fitness_values]", "properties into three lists fits, n_mods, stiffs = list(zip(*[(ind.get_fitness(), ind.num_modules, ind.stiffness) for ind", "list) and list_on_levels and k == 'modules_conf': print('\\t' * level + '{}: ['.format(k))", "fit # store also record into a local history to support MAP creation", "'{}'.format(element)) print('\\t' * level + ']') else: print('\\t'*level + '{}: {}'.format(k, v)) def", "evaluation only solutions # that have not been already evaluated skip = []", "local_history=None, coev=False, eval_all=False): \"\"\" :param toolbox: :param individuals: :param glob_history: a dictionary that", "simulation seed :param num_sim: simulation id :param normalize: whether to reports the plots", "'modules_conf': print('\\t' * level + '{}: ['.format(k)) for element in v: print('\\t' *", "in history.items(): out_file.write('{},{}\\n'.format(rob_string.strip(), fit[0])) # NOTE: these functions below are currently not used.", "REACHING TASK') ) print('# ' + '=' * bar_num + ' #\\n') print('#", ":param normalize: whether to reports the plots in a normalized manner (default: False)", "'fit_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': max(len(fits)//4, 1), 'discrete_hist': False, 'norm': normalize }, { 'data': n_mods,", "'norm': normalize } ] for conf in configs: plot_population_dist(conf) def plot_population_dist(conf): \"\"\" Plot", "}, { 'data': stiffs, 'title': 'Stiffness', 'out_file': os.path.join(results_folder, 'stiffness', 'stiff_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': STIFF_TABLE,", "+ ' #') print('\\t' + ' ' * 7 + 'EXPERIMENT CONFIGURATION') print('#", "+ '=' * bar_num + ' #') print('\\t\\tCO-EVOLUTIONARY SIMULATION\\n\\t' + ' ' *", "skip, len(skip), n_evaluations def plot_population_stats(pop, results_folder, seed, num_sim, normalize=False): \"\"\" Plot the fitness/num_modules/stiffness", "CONFIGURATION') print('# ' + '='*bar_num + ' #') print_dict(config) print('# ' + '='*bar_num", "CONTROLLERS \\n\\t' + ('FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path']", "density=conf['norm'], range=conf['h_range']) ax.set_xticks(bins) for i, (c, p) in enumerate(zip(bins, patches)): plt.setp(p, 'facecolor', colors[i])", "'SGR' in config['simulation_path'] else ' ' * 8 + 'FOR GOAL REACHING TASK')", "= glob_history[ind_string] skip.append(ind) else: to_evaluate.append(ind) n_evaluations = 0 if len(to_evaluate) > 0: fitnesses", "'Fitness', 'h_range': (0, np.max(fits)), 'out_file': os.path.join(results_folder, 'fitness', 'fit_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': max(len(fits)//4, 1), 'discrete_hist':", "0 if len(to_evaluate) > 0: fitnesses = toolbox.map(toolbox.evaluate, to_evaluate) n_evaluations = len(fitnesses) for", "False, 'norm': normalize }, { 'data': n_mods, 'title': '# modules', 'h_range': (2, 11),", "import matplotlib.pyplot as plt from params_conf import N_MODULES, STIFF_TABLE # ================================== # #", "rob_string, fit in history.items(): out_file.write('{},{}\\n'.format(rob_string.strip(), fit[0])) # NOTE: these functions below are currently", "CONFIGURATION') print('# ' + '=' * bar_num + ' #') print_dict(config, list_on_levels=False) print('#", "print('# ' + '=' * bar_num + ' #') print_dict(config, list_on_levels=False) print('# '", "across last generation'.format(conf['title']), fontweight='normal') ax.set_xlabel(conf['title']) ax.set_ylabel('# Individuals') if conf['norm']: ax.set_ylim(0, 1.1) plt.savefig(conf['out_file'], bbox_inches='tight')", "configuration ind.fitness.values = glob_history[ind_string] # store also record into a local history to", "{} logbook.record(gen=gen, nevals=len(inv_ind), **record) def record_population(num_sims, population, file, skips, pbar=None, verbose=False, coev=False): fitness_values", "individuals if (not ind.fitness.valid or (coev and eval_all))] # select for evaluation only", "* bar_num + ' #') print('\\t' + ' ' * 8 + 'EXPERIMENT", "print('# ' + '='*bar_num + ' #') def print_header_contr_evo(config, bar_num=50): print('# ' +", "import os import json import pickle import sqlite3 import numpy as np import", "in conf['data']: inds_vals[str(ind_stiff)] += 1 x, y = list(zip(*[(k, v) for k, v", "#') def print_header_contr_evo(config, bar_num=50): print('# ' + '=' * bar_num + ' #')", "{'family': 'Source Sans Pro', 'size': 12, 'weight': 'light'} matplotlib.rc('font', **font) matplotlib.rcParams[\"axes.titlepad\"] = 15", "functionality if local_history is not None: local_history[ind_string] = fit if not coev: n_evaluations", "'out_file': os.path.join(results_folder, 'fitness', 'fit_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': max(len(fits)//4, 1), 'discrete_hist': False, 'norm': normalize },", "gen, pop, inv_ind): if stats is not None: record = stats.compile(pop) if stats", "graph os.makedirs(os.path.dirname(conf['out_file']), exist_ok=True) num_colors = len(conf['bins']) if isinstance(conf['bins'], list) else conf['bins'] colors =", "# that have not been already evaluated skip = [] to_evaluate = []", "+ '=' * bar_num + ' #') print_dict(config, list_on_levels=False) print('# ' + '='", "' + '=' * bar_num + ' #') def store_checkpoint(checkpoint, filename): with open(filename,", "history_db: :return: \"\"\" history = {} if history_db is not None: conn =", "fitness_values = [1.0 / f for f in fitness_values] pop_stats = { 'num_sims':", "in provided db :param history: :param history_db: :return: \"\"\" def history_gen(): for record", "i, (c, p) in enumerate(zip(bins, patches)): plt.setp(p, 'facecolor', colors[i]) ax.set_title('{} distribution across last", "= len(conf['bins']) if isinstance(conf['bins'], list) else conf['bins'] colors = plt.cm.viridis(np.linspace(0, 1, num_colors)) fig", "'out_file': os.path.join(results_folder, 'n_modules', 'num_mods_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': N_MODULES, 'discrete_hist': True, 'norm': normalize }, {", "' + '=' * bar_num + ' #') print('\\t\\t EVOLUTIONARY SIMULATION\\n\\t' + '", "PRIMARY KEY, fitness REAL NOT NULL)''') cursor.executemany('''REPLACE INTO history(robot_string, fitness) VALUES (?)''', history_gen())", "In case no db is given, returns an empty history :param history_db: :return:", "history = {} if history_db is not None: conn = sqlite3.connect(history_db) cursor =", "'norm': normalize }, { 'data': n_mods, 'title': '# modules', 'h_range': (2, 11), 'out_file':", "module = { 'order': int(params[0].strip()), 'connectedModules': int(params[1].strip()), 'connectedFaces': int(params[2].strip()), 'freq': float(params[3].strip()), 'amplitude': float(params[4].strip()),", "= [1.0 / f for f in fitness_values] pop_stats = { 'num_sims': num_sims,", "' + '=' * bar_num + ' #\\n') print('# ' + '=' *", "+ ' #') def print_header_contr_evo(config, bar_num=50): print('# ' + '=' * bar_num +", "out_file.write('{},{}\\n'.format(rob_string.strip(), fit[0])) # NOTE: these functions below are currently not used. def load_history_db(history_db):", "' ' * 7 + 'OF MORPHOLOGY AND CONTROLLER\\n\\t' + ' ' *", "+ 'EXPERIMENT CONFIGURATION') print('# ' + '='*bar_num + ' #') print_dict(config) print('# '", ":param num_sim: simulation id :param normalize: whether to reports the plots in a", "Sans Pro', 'size': 12, 'weight': 'light'} matplotlib.rc('font', **font) matplotlib.rcParams[\"axes.titlepad\"] = 15 matplotlib.rcParams['figure.dpi'] =", "is given, returns an empty history :param history_db: :return: \"\"\" history = {}", "local_history[ind_string] = glob_history[ind_string] skip.append(ind) else: to_evaluate.append(ind) n_evaluations = 0 if len(to_evaluate) > 0:", "bar_num + ' #') print('\\t\\tCO-EVOLUTIONARY SIMULATION\\n\\t' + ' ' * 7 + 'OF", "colors = plt.cm.viridis(np.linspace(0, 1, num_colors)) fig = plt.figure(figsize=(12, 5)) ax = fig.gca() if", "'=' * bar_num + ' #') print('\\t' + ' ' * 7 +", "open(history_file) as history_in: # skip header history_in.readline() for line in history_in: robot_string, fitness", "np.max(fitness_values) } # store the current population values file.write(json.dumps({ **pop_stats, 'population': [individual.info(coev=coev) for", "support MAP creation functionality if local_history is not None: local_history[ind_string] = fit if", "10 + 'FOR GOAL REACHING TASK') ) print('# ' + '=' * bar_num", "'Stiffness', 'out_file': os.path.join(results_folder, 'stiffness', 'stiff_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': STIFF_TABLE, 'discrete_hist': True, 'norm': normalize }", ":param glob_history: a dictionary that maps simulator input strings into corresponding computed fitness.", "len(fitnesses) for ind, fit in zip(to_evaluate, fitnesses): ind.fitness.values = fit ind_string = ind.string_input()", "file :param history: :param history_file: :return: \"\"\" with open(history_file, 'w') as out_file: out_file.write('rob_string,fitness\\n')", ":param toolbox: :param individuals: :param glob_history: a dictionary that maps simulator input strings", "if history_db is not None: conn = sqlite3.connect(history_db) cursor = conn.cursor() for robot_string,", "print_header_contr_evo(config, bar_num=50): print('# ' + '=' * bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t'", "+ ' ' * 7 + '(SINGLE MAP-ELITES VARIANT)' ) print('# ' +", "None: local_history[ind_string] = fit if not coev: n_evaluations = len(individuals) return to_evaluate +", "ind.string_input() if not coev and ind_string in glob_history: # assign fitness previously computed", "+ '\\n\\t' + ' ' * 7 + '(SINGLE MAP-ELITES VARIANT)' ) print('#", ":return: \"\"\" # consider only new individuals (offsprings) invalid_ind = [ind for ind", "is not None: local_history[ind_string] = glob_history[ind_string] skip.append(ind) else: to_evaluate.append(ind) n_evaluations = 0 if", "font = {'family': 'Source Sans Pro', 'size': 12, 'weight': 'light'} matplotlib.rc('font', **font) matplotlib.rcParams[\"axes.titlepad\"]", "+ '=' * bar_num + ' #') def store_checkpoint(checkpoint, filename): with open(filename, 'wb')", "= stats.compile(pop) if stats is not None else {} logbook.record(gen=gen, nevals=len(inv_ind), **record) def", "provided file. In case no file is given, returns an empty history :param", "and os.path.exists(history_file): with open(history_file) as history_in: # skip header history_in.readline() for line in", "plots :param seed: simulation seed :param num_sim: simulation id :param normalize: whether to", "pbar is not None: pbar.set_postfix({ 'avg': pop_stats['avg_fitness'], 'std': pop_stats['std_dev'], 'min': pop_stats['min'], 'max': pop_stats['max'],", "'=' * bar_num + ' #') def print_header_coev(config, bar_num=55): print('# ' + '='", "empty history :param history_db: :return: \"\"\" history = {} if history_db is not", "return history def store_history(history, history_file): \"\"\" Store simulation history in provided file :param", "TENSEGRITY SOFT ROBOTS') print('# ' + '='*bar_num + ' #\\n') print('# ' +", "pop, inv_ind): if stats is not None: record = stats.compile(pop) if stats is", "distribution across given robot population :param pop: population of robot individuals :param results_folder:", "line in history_in: robot_string, fitness = line.strip().split(',') history[robot_string.strip()] = (float(fitness),) return history def", "if stats is not None: record = stats.compile(pop) if stats is not None", "#') def print_header_single_map(config, bar_num=55): print('# ' + '=' * bar_num + ' #')", "REACHING TASK') + '\\n\\t' + ' ' * 7 + '(DOUBLE MAP-ELITES VARIANT)'", "1 x, y = list(zip(*[(k, v) for k, v in sorted(inds_vals.items(), key=lambda r:", "for individual in population] })) if verbose: if pbar is not None: pbar.set_postfix({", "update history records with latest fitness glob_history[ind_string] = fit # store also record", "the if to_init: cursor.execute('''CREATE TABLE history(robot_string VARCHAR PRIMARY KEY, fitness REAL NOT NULL)''')", "#') def print_header_coev(config, bar_num=55): print('# ' + '=' * bar_num + ' #')", "parse_robot_string(rb_string): robot = [] for module_str in rb_string.split('--')[:-1]: params = module_str.split('-') module =", "manner (default: False) :return: \"\"\" # split robots interested properties into three lists", "history: :param history_file: :return: \"\"\" with open(history_file, 'w') as out_file: out_file.write('rob_string,fitness\\n') for rob_string,", "+ ' '*7 + 'EXPERIMENT CONFIGURATION') print('# ' + '='*bar_num + ' #')", "\\n\\t' + ('FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path'] else", "history.items(): out_file.write('{},{}\\n'.format(rob_string.strip(), fit[0])) # NOTE: these functions below are currently not used. def", "coev and ind_string in glob_history: # assign fitness previously computed for the same", "print('# ' + '=' * bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + '", "'=' * bar_num + ' #') print('\\t\\t EVOLUTIONARY SIMULATION\\n\\t' + ' ' *", "* bar_num + ' #') def store_checkpoint(checkpoint, filename): with open(filename, 'wb') as cp_file:", "list(zip(*[(k, v) for k, v in sorted(inds_vals.items(), key=lambda r: float(r[0]))])) if conf['norm']: y", "simulation history in provided file :param history: :param history_file: :return: \"\"\" with open(history_file,", "in population] })) if verbose: if pbar is not None: pbar.set_postfix({ 'avg': pop_stats['avg_fitness'],", "Load simulation history from provided file. In case no file is given, returns", "bar_num + ' #') def print_header_single_map(config, bar_num=55): print('# ' + '=' * bar_num", "for k, v in sorted(d.items()): if isinstance(v, dict): print('\\t'*level + k+':') print_dict(v, level+1,", "'min': np.min(fitness_values), 'max': np.max(fitness_values) } # store the current population values file.write(json.dumps({ **pop_stats,", "= fit if not coev: n_evaluations = len(individuals) return to_evaluate + skip, len(skip),", "print('# ' + '='*bar_num + ' #') print_dict(config) print('# ' + '='*bar_num +", "history_gen(): for record in history.items(): yield record to_init = not os.path.exists(history_db) conn =", "\"\"\" Load simulation history from provided db. In case no db is given,", "= 0 if len(to_evaluate) > 0: fitnesses = toolbox.map(toolbox.evaluate, to_evaluate) n_evaluations = len(fitnesses)", "ind.fitness.valid or (coev and eval_all))] # select for evaluation only solutions # that", "eval_all: :return: \"\"\" # consider only new individuals (offsprings) invalid_ind = [ind for", "'OF TENSEGRITY SOFT ROBOTS\\n\\t' + (' ' * 2 + 'FOR GOAL REACHING", "if (not ind.fitness.valid or (coev and eval_all))] # select for evaluation only solutions", "'*7 + 'EXPERIMENT CONFIGURATION') print('# ' + '='*bar_num + ' #') print_dict(config) print('#", "coev=False): fitness_values = np.asarray(list(map(lambda i: i.fitness.values, population))) if coev: fitness_values = [1.0 /", "(0, np.max(fits)), 'out_file': os.path.join(results_folder, 'fitness', 'fit_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': max(len(fits)//4, 1), 'discrete_hist': False, 'norm':", "+ '=' * bar_num + ' #') def print_header_single_map(config, bar_num=55): print('# ' +", "select for evaluation only solutions # that have not been already evaluated skip", "'EXPERIMENT CONFIGURATION') print('# ' + '=' * bar_num + ' #') print_dict(config, list_on_levels=False)", "def store_history_db(history, history_db): \"\"\" Store simulation history in provided db :param history: :param", "pbar.set_postfix({ 'avg': pop_stats['avg_fitness'], 'std': pop_stats['std_dev'], 'min': pop_stats['min'], 'max': pop_stats['max'], 'skip': skips }) else:", "SOFT ROBOTS\\n\\t' + (' ' * 2 + 'FOR GOAL REACHING AFTER SQUEEZING", "False) :return: \"\"\" # split robots interested properties into three lists fits, n_mods,", "n_evaluations = 0 if len(to_evaluate) > 0: fitnesses = toolbox.map(toolbox.evaluate, to_evaluate) n_evaluations =", "std: {} min: {} max: {}'.format( pop_stats['num_sims'], pop_stats['avg_fitness'], pop_stats['std_dev'], pop_stats['min'], pop_stats['max'] )) def", "= plt.cm.viridis(np.linspace(0, 1, num_colors)) fig = plt.figure(figsize=(12, 5)) ax = fig.gca() if conf['discrete_hist']:", "num_sim, normalize=False): \"\"\" Plot the fitness/num_modules/stiffness distribution across given robot population :param pop:", "matplotlib # select matplotlib backend matplotlib.use('pdf') import matplotlib.pyplot as plt from params_conf import", "})) if verbose: if pbar is not None: pbar.set_postfix({ 'avg': pop_stats['avg_fitness'], 'std': pop_stats['std_dev'],", "results_folder: folder where to store the plots :param seed: simulation seed :param num_sim:", "stiffs, 'title': 'Stiffness', 'out_file': os.path.join(results_folder, 'stiffness', 'stiff_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': STIFF_TABLE, 'discrete_hist': True, 'norm':", "\"\"\" Store simulation history in provided file :param history: :param history_file: :return: \"\"\"", "to reports the plots in a normalized manner (default: False) :return: \"\"\" #", "as plt from params_conf import N_MODULES, STIFF_TABLE # ================================== # # Utils functions", "\"\"\" with open(history_file, 'w') as out_file: out_file.write('rob_string,fitness\\n') for rob_string, fit in history.items(): out_file.write('{},{}\\n'.format(rob_string.strip(),", "level + ']') else: print('\\t'*level + '{}: {}'.format(k, v)) def print_header(config, bar_num=50): print('#", "cursor = conn.cursor() # create the if to_init: cursor.execute('''CREATE TABLE history(robot_string VARCHAR PRIMARY", "config['simulation_path'] else ' ' * 10 + 'FOR GOAL REACHING TASK') + '\\n\\t'", "new individuals (offsprings) invalid_ind = [ind for ind in individuals if (not ind.fitness.valid", "#') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' '*7 + 'OF TENSEGRITY SOFT ROBOTS') print('# '", "' #') def store_checkpoint(checkpoint, filename): with open(filename, 'wb') as cp_file: pickle.dump(checkpoint, cp_file) def", "\"\"\" font = {'family': 'Source Sans Pro', 'size': 12, 'weight': 'light'} matplotlib.rc('font', **font)", "' #') def print_header_single_map(config, bar_num=55): print('# ' + '=' * bar_num + '", "property across :param conf: :return: \"\"\" font = {'family': 'Source Sans Pro', 'size':", "1), 'discrete_hist': False, 'norm': normalize }, { 'data': n_mods, 'title': '# modules', 'h_range':", "' #') print_dict(config, list_on_levels=True) print('# ' + '=' * bar_num + ' #')", ":param history: :param history_file: :return: \"\"\" with open(history_file, 'w') as out_file: out_file.write('rob_string,fitness\\n') for", "= ind.string_input() # update history records with latest fitness glob_history[ind_string] = fit #", "fitnesses = toolbox.map(toolbox.evaluate, to_evaluate) n_evaluations = len(fitnesses) for ind, fit in zip(to_evaluate, fitnesses):", ":return: \"\"\" history = {} if history_db is not None: conn = sqlite3.connect(history_db)", "if not coev: n_evaluations = len(individuals) return to_evaluate + skip, len(skip), n_evaluations def", "no file is given, returns an empty history :param history_file: :return: \"\"\" history", "[] for ind in invalid_ind: ind_string = ind.string_input() if not coev and ind_string", "pop_stats['num_sims'], pop_stats['avg_fitness'], pop_stats['std_dev'], pop_stats['min'], pop_stats['max'] )) def evaluate_ind(toolbox, individuals, glob_history, local_history=None, coev=False, eval_all=False):", "cursor.execute('SELECT * FROM history'): history[robot_string] = (float(fitness),) cursor.close() conn.close() return history def store_history_db(history,", "ind_string = ind.string_input() # update history records with latest fitness glob_history[ind_string] = fit", "'=' * bar_num + ' #') print('\\t' + ' ' * 8 +", "local_history: :param coev: :param eval_all: :return: \"\"\" # consider only new individuals (offsprings)", "None and os.path.exists(history_file): with open(history_file) as history_in: # skip header history_in.readline() for line", "\"\"\" history = {} if history_file is not None and os.path.exists(history_file): with open(history_file)", "functionality if local_history is not None: local_history[ind_string] = glob_history[ind_string] skip.append(ind) else: to_evaluate.append(ind) n_evaluations", "robot individuals :param results_folder: folder where to store the plots :param seed: simulation", "inds_vals = {str(sv): 0 for sv in conf['bins']} for ind_stiff in conf['data']: inds_vals[str(ind_stiff)]", "= { 'order': int(params[0].strip()), 'connectedModules': int(params[1].strip()), 'connectedFaces': int(params[2].strip()), 'freq': float(params[3].strip()), 'amplitude': float(params[4].strip()), 'phase':", "not coev and ind_string in glob_history: # assign fitness previously computed for the", "params_conf import N_MODULES, STIFF_TABLE # ================================== # # Utils functions # # ==================================", "history_in: robot_string, fitness = line.strip().split(',') history[robot_string.strip()] = (float(fitness),) return history def store_history(history, history_file):", "STIFF_TABLE # ================================== # # Utils functions # # ================================== # def print_dict(d,", "an empty history :param history_file: :return: \"\"\" history = {} if history_file is", "def store_checkpoint(checkpoint, filename): with open(filename, 'wb') as cp_file: pickle.dump(checkpoint, cp_file) def record_info(logbook, stats,", ":param coev: :param eval_all: :return: \"\"\" # consider only new individuals (offsprings) invalid_ind", "plt.figure(figsize=(12, 5)) ax = fig.gca() if conf['discrete_hist']: inds_vals = {str(sv): 0 for sv", "population :param pop: population of robot individuals :param results_folder: folder where to store", "[ind for ind in individuals if (not ind.fitness.valid or (coev and eval_all))] #", "2 + 'FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path'] else", "MORPHOLOGY AND CONTROLLER\\n\\t' + ' ' * 8 + 'OF TENSEGRITY SOFT ROBOTS\\n\\t'", "to_evaluate = [] for ind in invalid_ind: ind_string = ind.string_input() if not coev", "' + '='*bar_num + ' #') print('\\t' + ' '*7 + 'EXPERIMENT CONFIGURATION')", "sqlite3 import numpy as np import matplotlib # select matplotlib backend matplotlib.use('pdf') import", "if conf['norm']: y = np.asarray(y)/np.sum(y) ax.bar(x, y, color=colors) else: _, bins, patches =", "color=colors) else: _, bins, patches = ax.hist(conf['data'], bins=conf['bins'], density=conf['norm'], range=conf['h_range']) ax.set_xticks(bins) for i,", "cp_file) def record_info(logbook, stats, gen, pop, inv_ind): if stats is not None: record", "print('# ' + '=' * bar_num + ' #') print_dict(config, list_on_levels=True) print('# '", "list_on_levels) elif isinstance(v, list) and list_on_levels and k == 'modules_conf': print('\\t' * level", "\"\"\" # consider only new individuals (offsprings) invalid_ind = [ind for ind in", "individuals (offsprings) invalid_ind = [ind for ind in individuals if (not ind.fitness.valid or", "= fit # store also record into a local history to support MAP", "list_on_levels and k == 'modules_conf': print('\\t' * level + '{}: ['.format(k)) for element", "print_dict(config) print('# ' + '='*bar_num + ' #') def print_header_contr_evo(config, bar_num=50): print('# '", "history_file): \"\"\" Store simulation history in provided file :param history: :param history_file: :return:", "Utils functions # # ================================== # def print_dict(d, level=0, list_on_levels=False): for k, v", "and ind_string in glob_history: # assign fitness previously computed for the same configuration", "print('\\t' + ' ' * 7 + 'EXPERIMENT CONFIGURATION') print('# ' + '='", ":param pop: population of robot individuals :param results_folder: folder where to store the", "' * 10 + 'FOR GOAL REACHING TASK') + '\\n\\t' + ' '", "for ind in pop])) # plot the properties distribution of last generation configs", "plots in a normalized manner (default: False) :return: \"\"\" # split robots interested", "'std_dev': np.std(fitness_values), 'min': np.min(fitness_values), 'max': np.max(fitness_values) } # store the current population values", "last generation configs = [ { 'data': fits, 'title': 'Fitness', 'h_range': (0, np.max(fits)),", "if coev: fitness_values = [1.0 / f for f in fitness_values] pop_stats =", "7 + '(SINGLE MAP-ELITES VARIANT)' ) print('# ' + '=' * bar_num +", "toolbox: :param individuals: :param glob_history: a dictionary that maps simulator input strings into", "no db is given, returns an empty history :param history_db: :return: \"\"\" history", "as history_in: # skip header history_in.readline() for line in history_in: robot_string, fitness =", "from params_conf import N_MODULES, STIFF_TABLE # ================================== # # Utils functions # #", "ax.hist(conf['data'], bins=conf['bins'], density=conf['norm'], range=conf['h_range']) ax.set_xticks(bins) for i, (c, p) in enumerate(zip(bins, patches)): plt.setp(p,", "AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path'] else ' ' * 10 +", "population, file, skips, pbar=None, verbose=False, coev=False): fitness_values = np.asarray(list(map(lambda i: i.fitness.values, population))) if", "8 + 'EXPERIMENT CONFIGURATION') print('# ' + '=' * bar_num + ' #')", "robot = [] for module_str in rb_string.split('--')[:-1]: params = module_str.split('-') module = {", "'fitness', 'fit_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': max(len(fits)//4, 1), 'discrete_hist': False, 'norm': normalize }, { 'data':", "'stiff': float(params[7].strip()) } robot.append(module) return robot # ================================== # # SIM HISTORY MANAGEMENT", "' #') print('\\t\\t EVOLUTIONARY SIMULATION\\n\\t' + ' ' * 7 + 'OF MORPHOLOGY", "'connectedModules': int(params[1].strip()), 'connectedFaces': int(params[2].strip()), 'freq': float(params[3].strip()), 'amplitude': float(params[4].strip()), 'phase': float(params[5].strip()), 'rot': float(params[6].strip()), 'stiff':", "in v: print('\\t' * (level+1) + '{}'.format(element)) print('\\t' * level + ']') else:", "= not os.path.exists(history_db) conn = sqlite3.connect(history_db) cursor = conn.cursor() # create the if", "normalize=False): \"\"\" Plot the fitness/num_modules/stiffness distribution across given robot population :param pop: population", "= [ { 'data': fits, 'title': 'Fitness', 'h_range': (0, np.max(fits)), 'out_file': os.path.join(results_folder, 'fitness',", "these functions below are currently not used. def load_history_db(history_db): \"\"\" Load simulation history", "create the if to_init: cursor.execute('''CREATE TABLE history(robot_string VARCHAR PRIMARY KEY, fitness REAL NOT", "[] for module_str in rb_string.split('--')[:-1]: params = module_str.split('-') module = { 'order': int(params[0].strip()),", "' #') print('\\t' + ' ' * 7 + 'EXPERIMENT CONFIGURATION') print('# '", "SOFT ROBOTS CONTROLLERS \\n\\t' + ('FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR'", "of given property across :param conf: :return: \"\"\" font = {'family': 'Source Sans", "file. In case no file is given, returns an empty history :param history_file:", "'num_mods_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': N_MODULES, 'discrete_hist': True, 'norm': normalize }, { 'data': stiffs, 'title':", "Load simulation history from provided db. In case no db is given, returns", "# store also record into a local history to support MAP creation functionality", "+ '=' * bar_num + ' #') print('\\t' + ' ' * 7", "open(history_file, 'w') as out_file: out_file.write('rob_string,fitness\\n') for rob_string, fit in history.items(): out_file.write('{},{}\\n'.format(rob_string.strip(), fit[0])) #", "import json import pickle import sqlite3 import numpy as np import matplotlib #", "ax.set_xlabel(conf['title']) ax.set_ylabel('# Individuals') if conf['norm']: ax.set_ylim(0, 1.1) plt.savefig(conf['out_file'], bbox_inches='tight') plt.close() def parse_robot_string(rb_string): robot", "ind in pop])) # plot the properties distribution of last generation configs =", "module_str.split('-') module = { 'order': int(params[0].strip()), 'connectedModules': int(params[1].strip()), 'connectedFaces': int(params[2].strip()), 'freq': float(params[3].strip()), 'amplitude':", "+ '=' * bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' OF TENSEGRITY", "simulation id :param normalize: whether to reports the plots in a normalized manner", "fit in zip(to_evaluate, fitnesses): ind.fitness.values = fit ind_string = ind.string_input() # update history", "# select matplotlib backend matplotlib.use('pdf') import matplotlib.pyplot as plt from params_conf import N_MODULES,", "print('\\t' * level + '{}: ['.format(k)) for element in v: print('\\t' * (level+1)", "not None: local_history[ind_string] = fit if not coev: n_evaluations = len(individuals) return to_evaluate", "local history to support MAP creation functionality if local_history is not None: local_history[ind_string]", "print('# ' + '=' * bar_num + ' #') def print_header_single_map(config, bar_num=55): print('#", "history_file: :return: \"\"\" with open(history_file, 'w') as out_file: out_file.write('rob_string,fitness\\n') for rob_string, fit in", "7 + '(DOUBLE MAP-ELITES VARIANT)' ) print('# ' + '=' * bar_num +", "records with latest fitness glob_history[ind_string] = fit # store also record into a", "for conf in configs: plot_population_dist(conf) def plot_population_dist(conf): \"\"\" Plot the distribution of the", "{} max: {}'.format( pop_stats['num_sims'], pop_stats['avg_fitness'], pop_stats['std_dev'], pop_stats['min'], pop_stats['max'] )) def evaluate_ind(toolbox, individuals, glob_history,", "+ ' #') print('\\t' + ' '*7 + 'EXPERIMENT CONFIGURATION') print('# ' +", "plt.cm.viridis(np.linspace(0, 1, num_colors)) fig = plt.figure(figsize=(12, 5)) ax = fig.gca() if conf['discrete_hist']: inds_vals", "import sqlite3 import numpy as np import matplotlib # select matplotlib backend matplotlib.use('pdf')", "coev: fitness_values = [1.0 / f for f in fitness_values] pop_stats = {", "None: record = stats.compile(pop) if stats is not None else {} logbook.record(gen=gen, nevals=len(inv_ind),", "latest fitness glob_history[ind_string] = fit # store also record into a local history", "individuals: :param glob_history: a dictionary that maps simulator input strings into corresponding computed", "def load_history(history_file): \"\"\" Load simulation history from provided file. In case no file", "'h_range': (0, np.max(fits)), 'out_file': os.path.join(results_folder, 'fitness', 'fit_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': max(len(fits)//4, 1), 'discrete_hist': False,", "'SGR' in config['simulation_path'] else ' ' * 10 + 'FOR GOAL REACHING TASK')", "not None and os.path.exists(history_file): with open(history_file) as history_in: # skip header history_in.readline() for", "' * 7 + 'OF MORPHOLOGY AND CONTROLLER\\n\\t' + ' ' * 8", "num_sims, 'avg_fitness': np.mean(fitness_values), 'std_dev': np.std(fitness_values), 'min': np.min(fitness_values), 'max': np.max(fitness_values) } # store the", "GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path'] else ' ' *", "simulator input strings into corresponding computed fitness. :param local_history: :param coev: :param eval_all:", "split robots interested properties into three lists fits, n_mods, stiffs = list(zip(*[(ind.get_fitness(), ind.num_modules,", "already evaluated skip = [] to_evaluate = [] for ind in invalid_ind: ind_string", "matplotlib.rc('font', **font) matplotlib.rcParams[\"axes.titlepad\"] = 15 matplotlib.rcParams['figure.dpi'] = 300 # create potential needed directory", "+ ' OF TENSEGRITY SOFT ROBOTS CONTROLLERS \\n\\t' + ('FOR GOAL REACHING AFTER", "same configuration ind.fitness.values = glob_history[ind_string] # store also record into a local history", "if not coev and ind_string in glob_history: # assign fitness previously computed for", "* FROM history'): history[robot_string] = (float(fitness),) cursor.close() conn.close() return history def store_history_db(history, history_db):", "# update history records with latest fitness glob_history[ind_string] = fit # store also", "('FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path'] else ' '", "enumerate(zip(bins, patches)): plt.setp(p, 'facecolor', colors[i]) ax.set_title('{} distribution across last generation'.format(conf['title']), fontweight='normal') ax.set_xlabel(conf['title']) ax.set_ylabel('#", ":param history_file: :return: \"\"\" history = {} if history_file is not None and", "{ 'data': stiffs, 'title': 'Stiffness', 'out_file': os.path.join(results_folder, 'stiffness', 'stiff_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': STIFF_TABLE, 'discrete_hist':", "' + '=' * bar_num + ' #') def print_header_coev(config, bar_num=55): print('# '", "coev=False, eval_all=False): \"\"\" :param toolbox: :param individuals: :param glob_history: a dictionary that maps", ":param history_db: :return: \"\"\" history = {} if history_db is not None: conn", "'light'} matplotlib.rc('font', **font) matplotlib.rcParams[\"axes.titlepad\"] = 15 matplotlib.rcParams['figure.dpi'] = 300 # create potential needed", "+ ' ' * 7 + 'EXPERIMENT CONFIGURATION') print('# ' + '=' *", "' ' * 7 + '(DOUBLE MAP-ELITES VARIANT)' ) print('# ' + '='", "seed, num_sim, normalize=False): \"\"\" Plot the fitness/num_modules/stiffness distribution across given robot population :param", "eval_all))] # select for evaluation only solutions # that have not been already", "if local_history is not None: local_history[ind_string] = fit if not coev: n_evaluations =", "skips }) else: print('num_sims: {} | Fitness -> avg: {} std: {} min:", "plt from params_conf import N_MODULES, STIFF_TABLE # ================================== # # Utils functions #", "skip header history_in.readline() for line in history_in: robot_string, fitness = line.strip().split(',') history[robot_string.strip()] =", "{}'.format( pop_stats['num_sims'], pop_stats['avg_fitness'], pop_stats['std_dev'], pop_stats['min'], pop_stats['max'] )) def evaluate_ind(toolbox, individuals, glob_history, local_history=None, coev=False,", "'out_file': os.path.join(results_folder, 'stiffness', 'stiff_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': STIFF_TABLE, 'discrete_hist': True, 'norm': normalize } ]", "in invalid_ind: ind_string = ind.string_input() if not coev and ind_string in glob_history: #", "def parse_robot_string(rb_string): robot = [] for module_str in rb_string.split('--')[:-1]: params = module_str.split('-') module", "plot the properties distribution of last generation configs = [ { 'data': fits,", "print('\\t' + ' '*7 + 'EXPERIMENT CONFIGURATION') print('# ' + '='*bar_num + '", "' #') print('\\t\\tCO-EVOLUTIONARY SIMULATION\\n\\t' + ' ' * 7 + 'OF MORPHOLOGY AND", "matplotlib.rcParams['figure.dpi'] = 300 # create potential needed directory where to store the graph", "print_dict(config, list_on_levels=True) print('# ' + '=' * bar_num + ' #') def print_header_coev(config,", "out_file.write('rob_string,fitness\\n') for rob_string, fit in history.items(): out_file.write('{},{}\\n'.format(rob_string.strip(), fit[0])) # NOTE: these functions below", "corresponding computed fitness. :param local_history: :param coev: :param eval_all: :return: \"\"\" # consider", "bar_num + ' #\\n') print('# ' + '=' * bar_num + ' #')", "else: print('num_sims: {} | Fitness -> avg: {} std: {} min: {} max:", "v)) def print_header(config, bar_num=50): print('# ' + '='*bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t'", "with open(history_file, 'w') as out_file: out_file.write('rob_string,fitness\\n') for rob_string, fit in history.items(): out_file.write('{},{}\\n'.format(rob_string.strip(), fit[0]))", "def print_header_single_map(config, bar_num=55): print('# ' + '=' * bar_num + ' #') print('\\t\\t", "simulation history in provided db :param history: :param history_db: :return: \"\"\" def history_gen():", "Store simulation history in provided db :param history: :param history_db: :return: \"\"\" def", "input strings into corresponding computed fitness. :param local_history: :param coev: :param eval_all: :return:", "not None: pbar.set_postfix({ 'avg': pop_stats['avg_fitness'], 'std': pop_stats['std_dev'], 'min': pop_stats['min'], 'max': pop_stats['max'], 'skip': skips", "distribution across last generation'.format(conf['title']), fontweight='normal') ax.set_xlabel(conf['title']) ax.set_ylabel('# Individuals') if conf['norm']: ax.set_ylim(0, 1.1) plt.savefig(conf['out_file'],", "isinstance(v, dict): print('\\t'*level + k+':') print_dict(v, level+1, list_on_levels) elif isinstance(v, list) and list_on_levels", "i: i.fitness.values, population))) if coev: fitness_values = [1.0 / f for f in", "is not None: pbar.set_postfix({ 'avg': pop_stats['avg_fitness'], 'std': pop_stats['std_dev'], 'min': pop_stats['min'], 'max': pop_stats['max'], 'skip':", "' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' '*7 + 'OF TENSEGRITY SOFT ROBOTS') print('#", "have not been already evaluated skip = [] to_evaluate = [] for ind", "{ 'order': int(params[0].strip()), 'connectedModules': int(params[1].strip()), 'connectedFaces': int(params[2].strip()), 'freq': float(params[3].strip()), 'amplitude': float(params[4].strip()), 'phase': float(params[5].strip()),", "for ind in individuals if (not ind.fitness.valid or (coev and eval_all))] # select", "' ' * 10 + 'FOR GOAL REACHING TASK') + '\\n\\t' + '", "8 + 'OF TENSEGRITY SOFT ROBOTS\\n\\t' + (' ' * 2 + 'FOR", "to store the graph os.makedirs(os.path.dirname(conf['out_file']), exist_ok=True) num_colors = len(conf['bins']) if isinstance(conf['bins'], list) else", "params = module_str.split('-') module = { 'order': int(params[0].strip()), 'connectedModules': int(params[1].strip()), 'connectedFaces': int(params[2].strip()), 'freq':", "robot.append(module) return robot # ================================== # # SIM HISTORY MANAGEMENT # # ==================================", "np.asarray(y)/np.sum(y) ax.bar(x, y, color=colors) else: _, bins, patches = ax.hist(conf['data'], bins=conf['bins'], density=conf['norm'], range=conf['h_range'])", "as cp_file: pickle.dump(checkpoint, cp_file) def record_info(logbook, stats, gen, pop, inv_ind): if stats is", "if 'SGR' in config['simulation_path'] else ' ' * 8 + 'FOR GOAL REACHING", "given, returns an empty history :param history_file: :return: \"\"\" history = {} if", "'freq': float(params[3].strip()), 'amplitude': float(params[4].strip()), 'phase': float(params[5].strip()), 'rot': float(params[6].strip()), 'stiff': float(params[7].strip()) } robot.append(module) return", "AND CONTROLLER\\n\\t' + ' ' * 8 + 'OF TENSEGRITY SOFT ROBOTS\\n\\t' +", "+ '=' * bar_num + ' #\\n') print('# ' + '=' * bar_num", "print('# ' + '=' * bar_num + ' #\\n') print('# ' + '='", "'num_sims': num_sims, 'avg_fitness': np.mean(fitness_values), 'std_dev': np.std(fitness_values), 'min': np.min(fitness_values), 'max': np.max(fitness_values) } # store", "out_file: out_file.write('rob_string,fitness\\n') for rob_string, fit in history.items(): out_file.write('{},{}\\n'.format(rob_string.strip(), fit[0])) # NOTE: these functions", "conf['data']: inds_vals[str(ind_stiff)] += 1 x, y = list(zip(*[(k, v) for k, v in", "skip.append(ind) else: to_evaluate.append(ind) n_evaluations = 0 if len(to_evaluate) > 0: fitnesses = toolbox.map(toolbox.evaluate,", "* bar_num + ' #') print('\\t\\t EVOLUTIONARY SIMULATION\\n\\t' + ' ' * 7", "pbar=None, verbose=False, coev=False): fitness_values = np.asarray(list(map(lambda i: i.fitness.values, population))) if coev: fitness_values =", "* level + ']') else: print('\\t'*level + '{}: {}'.format(k, v)) def print_header(config, bar_num=50):", "list_on_levels=True) print('# ' + '=' * bar_num + ' #') def print_header_coev(config, bar_num=55):", "population))) if coev: fitness_values = [1.0 / f for f in fitness_values] pop_stats", "fit ind_string = ind.string_input() # update history records with latest fitness glob_history[ind_string] =", "{} if history_db is not None: conn = sqlite3.connect(history_db) cursor = conn.cursor() for", "list_on_levels=False) print('# ' + '=' * bar_num + ' #') def store_checkpoint(checkpoint, filename):", "conf['bins'] colors = plt.cm.viridis(np.linspace(0, 1, num_colors)) fig = plt.figure(figsize=(12, 5)) ax = fig.gca()", "if to_init: cursor.execute('''CREATE TABLE history(robot_string VARCHAR PRIMARY KEY, fitness REAL NOT NULL)''') cursor.executemany('''REPLACE", "else: print('\\t'*level + '{}: {}'.format(k, v)) def print_header(config, bar_num=50): print('# ' + '='*bar_num", "an empty history :param history_db: :return: \"\"\" history = {} if history_db is", "in history.items(): yield record to_init = not os.path.exists(history_db) conn = sqlite3.connect(history_db) cursor =", "fits, 'title': 'Fitness', 'h_range': (0, np.max(fits)), 'out_file': os.path.join(results_folder, 'fitness', 'fit_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': max(len(fits)//4,", "if len(to_evaluate) > 0: fitnesses = toolbox.map(toolbox.evaluate, to_evaluate) n_evaluations = len(fitnesses) for ind,", "# ================================== # def print_dict(d, level=0, list_on_levels=False): for k, v in sorted(d.items()): if", "# SIM HISTORY MANAGEMENT # # ================================== # def load_history(history_file): \"\"\" Load simulation", "np.max(fits)), 'out_file': os.path.join(results_folder, 'fitness', 'fit_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': max(len(fits)//4, 1), 'discrete_hist': False, 'norm': normalize", "' + '=' * bar_num + ' #') print('\\t' + ' ' *", "not been already evaluated skip = [] to_evaluate = [] for ind in", "store_checkpoint(checkpoint, filename): with open(filename, 'wb') as cp_file: pickle.dump(checkpoint, cp_file) def record_info(logbook, stats, gen,", "not None else {} logbook.record(gen=gen, nevals=len(inv_ind), **record) def record_population(num_sims, population, file, skips, pbar=None,", "skips, pbar=None, verbose=False, coev=False): fitness_values = np.asarray(list(map(lambda i: i.fitness.values, population))) if coev: fitness_values", "print('\\t'*level + '{}: {}'.format(k, v)) def print_header(config, bar_num=50): print('# ' + '='*bar_num +", ":return: \"\"\" with open(history_file, 'w') as out_file: out_file.write('rob_string,fitness\\n') for rob_string, fit in history.items():", "'skip': skips }) else: print('num_sims: {} | Fitness -> avg: {} std: {}", "'='*bar_num + ' #') print('\\t' + ' '*7 + 'EXPERIMENT CONFIGURATION') print('# '", "'h_range': (2, 11), 'out_file': os.path.join(results_folder, 'n_modules', 'num_mods_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': N_MODULES, 'discrete_hist': True, 'norm':", "only solutions # that have not been already evaluated skip = [] to_evaluate", "= conn.cursor() for robot_string, fitness in cursor.execute('SELECT * FROM history'): history[robot_string] = (float(fitness),)", "print_header(config, bar_num=50): print('# ' + '='*bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + '", "given property across :param conf: :return: \"\"\" font = {'family': 'Source Sans Pro',", "inds_vals[str(ind_stiff)] += 1 x, y = list(zip(*[(k, v) for k, v in sorted(inds_vals.items(),", "patches)): plt.setp(p, 'facecolor', colors[i]) ax.set_title('{} distribution across last generation'.format(conf['title']), fontweight='normal') ax.set_xlabel(conf['title']) ax.set_ylabel('# Individuals')", "conn.cursor() # create the if to_init: cursor.execute('''CREATE TABLE history(robot_string VARCHAR PRIMARY KEY, fitness", "conf['norm']: y = np.asarray(y)/np.sum(y) ax.bar(x, y, color=colors) else: _, bins, patches = ax.hist(conf['data'],", "'(SINGLE MAP-ELITES VARIANT)' ) print('# ' + '=' * bar_num + ' #\\n')", "' * 8 + 'FOR GOAL REACHING TASK') ) print('# ' + '='", "v: print('\\t' * (level+1) + '{}'.format(element)) print('\\t' * level + ']') else: print('\\t'*level", "os.makedirs(os.path.dirname(conf['out_file']), exist_ok=True) num_colors = len(conf['bins']) if isinstance(conf['bins'], list) else conf['bins'] colors = plt.cm.viridis(np.linspace(0,", "is given, returns an empty history :param history_file: :return: \"\"\" history = {}", "for element in v: print('\\t' * (level+1) + '{}'.format(element)) print('\\t' * level +", "history_file is not None and os.path.exists(history_file): with open(history_file) as history_in: # skip header", "ax.set_xticks(bins) for i, (c, p) in enumerate(zip(bins, patches)): plt.setp(p, 'facecolor', colors[i]) ax.set_title('{} distribution", "Pro', 'size': 12, 'weight': 'light'} matplotlib.rc('font', **font) matplotlib.rcParams[\"axes.titlepad\"] = 15 matplotlib.rcParams['figure.dpi'] = 300", "module_str in rb_string.split('--')[:-1]: params = module_str.split('-') module = { 'order': int(params[0].strip()), 'connectedModules': int(params[1].strip()),", "pop_stats['min'], pop_stats['max'] )) def evaluate_ind(toolbox, individuals, glob_history, local_history=None, coev=False, eval_all=False): \"\"\" :param toolbox:", "n_mods, stiffs = list(zip(*[(ind.get_fitness(), ind.num_modules, ind.stiffness) for ind in pop])) # plot the", "ax.set_title('{} distribution across last generation'.format(conf['title']), fontweight='normal') ax.set_xlabel(conf['title']) ax.set_ylabel('# Individuals') if conf['norm']: ax.set_ylim(0, 1.1)", "'wb') as cp_file: pickle.dump(checkpoint, cp_file) def record_info(logbook, stats, gen, pop, inv_ind): if stats", "fitness/num_modules/stiffness distribution across given robot population :param pop: population of robot individuals :param", "in cursor.execute('SELECT * FROM history'): history[robot_string] = (float(fitness),) cursor.close() conn.close() return history def", "ind_string in glob_history: # assign fitness previously computed for the same configuration ind.fitness.values", "\"\"\" history = {} if history_db is not None: conn = sqlite3.connect(history_db) cursor", "bbox_inches='tight') plt.close() def parse_robot_string(rb_string): robot = [] for module_str in rb_string.split('--')[:-1]: params =", "= (float(fitness),) return history def store_history(history, history_file): \"\"\" Store simulation history in provided", "def history_gen(): for record in history.items(): yield record to_init = not os.path.exists(history_db) conn", "float(params[7].strip()) } robot.append(module) return robot # ================================== # # SIM HISTORY MANAGEMENT #", "} # store the current population values file.write(json.dumps({ **pop_stats, 'population': [individual.info(coev=coev) for individual", "history :param history_file: :return: \"\"\" history = {} if history_file is not None", "fontweight='normal') ax.set_xlabel(conf['title']) ax.set_ylabel('# Individuals') if conf['norm']: ax.set_ylim(0, 1.1) plt.savefig(conf['out_file'], bbox_inches='tight') plt.close() def parse_robot_string(rb_string):", "sorted(d.items()): if isinstance(v, dict): print('\\t'*level + k+':') print_dict(v, level+1, list_on_levels) elif isinstance(v, list)", "filename): with open(filename, 'wb') as cp_file: pickle.dump(checkpoint, cp_file) def record_info(logbook, stats, gen, pop,", "# # SIM HISTORY MANAGEMENT # # ================================== # def load_history(history_file): \"\"\" Load", "5)) ax = fig.gca() if conf['discrete_hist']: inds_vals = {str(sv): 0 for sv in", "stats, gen, pop, inv_ind): if stats is not None: record = stats.compile(pop) if", "n_evaluations = len(individuals) return to_evaluate + skip, len(skip), n_evaluations def plot_population_stats(pop, results_folder, seed,", "conf['norm']: ax.set_ylim(0, 1.1) plt.savefig(conf['out_file'], bbox_inches='tight') plt.close() def parse_robot_string(rb_string): robot = [] for module_str", "pop_stats['max'] )) def evaluate_ind(toolbox, individuals, glob_history, local_history=None, coev=False, eval_all=False): \"\"\" :param toolbox: :param", "'='*bar_num + ' #') print_dict(config) print('# ' + '='*bar_num + ' #') def", "plt.setp(p, 'facecolor', colors[i]) ax.set_title('{} distribution across last generation'.format(conf['title']), fontweight='normal') ax.set_xlabel(conf['title']) ax.set_ylabel('# Individuals') if", "last generation'.format(conf['title']), fontweight='normal') ax.set_xlabel(conf['title']) ax.set_ylabel('# Individuals') if conf['norm']: ax.set_ylim(0, 1.1) plt.savefig(conf['out_file'], bbox_inches='tight') plt.close()", "os.path.join(results_folder, 'fitness', 'fit_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': max(len(fits)//4, 1), 'discrete_hist': False, 'norm': normalize }, {", "directory where to store the graph os.makedirs(os.path.dirname(conf['out_file']), exist_ok=True) num_colors = len(conf['bins']) if isinstance(conf['bins'],", "line.strip().split(',') history[robot_string.strip()] = (float(fitness),) return history def store_history(history, history_file): \"\"\" Store simulation history", "# store the current population values file.write(json.dumps({ **pop_stats, 'population': [individual.info(coev=coev) for individual in", "' + '=' * bar_num + ' #') print('\\t\\tCO-EVOLUTIONARY SIMULATION\\n\\t' + ' '", ":return: \"\"\" # split robots interested properties into three lists fits, n_mods, stiffs", "fitness_values] pop_stats = { 'num_sims': num_sims, 'avg_fitness': np.mean(fitness_values), 'std_dev': np.std(fitness_values), 'min': np.min(fitness_values), 'max':", "in configs: plot_population_dist(conf) def plot_population_dist(conf): \"\"\" Plot the distribution of the values of", "pop_stats['avg_fitness'], pop_stats['std_dev'], pop_stats['min'], pop_stats['max'] )) def evaluate_ind(toolbox, individuals, glob_history, local_history=None, coev=False, eval_all=False): \"\"\"", "= fig.gca() if conf['discrete_hist']: inds_vals = {str(sv): 0 for sv in conf['bins']} for", "print('# ' + '=' * bar_num + ' #') def print_header_coev(config, bar_num=55): print('#", "conf['bins']} for ind_stiff in conf['data']: inds_vals[str(ind_stiff)] += 1 x, y = list(zip(*[(k, v)", "import N_MODULES, STIFF_TABLE # ================================== # # Utils functions # # ================================== #", "history def store_history_db(history, history_db): \"\"\" Store simulation history in provided db :param history:", "'OF TENSEGRITY SOFT ROBOTS') print('# ' + '='*bar_num + ' #\\n') print('# '", "list) else conf['bins'] colors = plt.cm.viridis(np.linspace(0, 1, num_colors)) fig = plt.figure(figsize=(12, 5)) ax", "pop_stats['std_dev'], 'min': pop_stats['min'], 'max': pop_stats['max'], 'skip': skips }) else: print('num_sims: {} | Fitness", "STIFF_TABLE, 'discrete_hist': True, 'norm': normalize } ] for conf in configs: plot_population_dist(conf) def", "across :param conf: :return: \"\"\" font = {'family': 'Source Sans Pro', 'size': 12,", "for ind, fit in zip(to_evaluate, fitnesses): ind.fitness.values = fit ind_string = ind.string_input() #", "'*7 + 'OF TENSEGRITY SOFT ROBOTS') print('# ' + '='*bar_num + ' #\\n')", "normalized manner (default: False) :return: \"\"\" # split robots interested properties into three", "= conn.cursor() # create the if to_init: cursor.execute('''CREATE TABLE history(robot_string VARCHAR PRIMARY KEY,", "record into a local history to support MAP creation functionality if local_history is", "file is given, returns an empty history :param history_file: :return: \"\"\" history =", "* bar_num + ' #') print_dict(config, list_on_levels=True) print('# ' + '=' * bar_num", "generation'.format(conf['title']), fontweight='normal') ax.set_xlabel(conf['title']) ax.set_ylabel('# Individuals') if conf['norm']: ax.set_ylim(0, 1.1) plt.savefig(conf['out_file'], bbox_inches='tight') plt.close() def", "v) for k, v in sorted(inds_vals.items(), key=lambda r: float(r[0]))])) if conf['norm']: y =", "ind.fitness.values = glob_history[ind_string] # store also record into a local history to support", "local_history is not None: local_history[ind_string] = fit if not coev: n_evaluations = len(individuals)", ":return: \"\"\" def history_gen(): for record in history.items(): yield record to_init = not", "with latest fitness glob_history[ind_string] = fit # store also record into a local", "' #') print('\\t' + ' ' * 8 + 'EXPERIMENT CONFIGURATION') print('# '", "* 8 + 'FOR GOAL REACHING TASK') ) print('# ' + '=' *", "'=' * bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' OF TENSEGRITY SOFT", "bar_num=55): print('# ' + '=' * bar_num + ' #') print('\\t\\t EVOLUTIONARY SIMULATION\\n\\t'", "element in v: print('\\t' * (level+1) + '{}'.format(element)) print('\\t' * level + ']')", "history_db is not None: conn = sqlite3.connect(history_db) cursor = conn.cursor() for robot_string, fitness", "in config['simulation_path'] else ' ' * 8 + 'FOR GOAL REACHING TASK') )", "# def load_history(history_file): \"\"\" Load simulation history from provided file. In case no", "+ ' ' * 8 + 'EXPERIMENT CONFIGURATION') print('# ' + '=' *", "print_dict(config, list_on_levels=False) print('# ' + '=' * bar_num + ' #') def print_header_double_map(config,", "= glob_history[ind_string] # store also record into a local history to support MAP", "7 + 'EXPERIMENT CONFIGURATION') print('# ' + '=' * bar_num + ' #')", "' + '='*bar_num + ' #') print_dict(config) print('# ' + '='*bar_num + '", "assign fitness previously computed for the same configuration ind.fitness.values = glob_history[ind_string] # store", "key=lambda r: float(r[0]))])) if conf['norm']: y = np.asarray(y)/np.sum(y) ax.bar(x, y, color=colors) else: _,", "eval_all=False): \"\"\" :param toolbox: :param individuals: :param glob_history: a dictionary that maps simulator", "normalize } ] for conf in configs: plot_population_dist(conf) def plot_population_dist(conf): \"\"\" Plot the", "os.path.exists(history_db) conn = sqlite3.connect(history_db) cursor = conn.cursor() # create the if to_init: cursor.execute('''CREATE", "given robot population :param pop: population of robot individuals :param results_folder: folder where", ":param history: :param history_db: :return: \"\"\" def history_gen(): for record in history.items(): yield", "ind, fit in zip(to_evaluate, fitnesses): ind.fitness.values = fit ind_string = ind.string_input() # update", "* 7 + '(SINGLE MAP-ELITES VARIANT)' ) print('# ' + '=' * bar_num", "' + '=' * bar_num + ' #') def print_header_double_map(config, bar_num=55): print('# '", "pop: population of robot individuals :param results_folder: folder where to store the plots", "nevals=len(inv_ind), **record) def record_population(num_sims, population, file, skips, pbar=None, verbose=False, coev=False): fitness_values = np.asarray(list(map(lambda", "coev: n_evaluations = len(individuals) return to_evaluate + skip, len(skip), n_evaluations def plot_population_stats(pop, results_folder,", "None else {} logbook.record(gen=gen, nevals=len(inv_ind), **record) def record_population(num_sims, population, file, skips, pbar=None, verbose=False,", "None: local_history[ind_string] = glob_history[ind_string] skip.append(ind) else: to_evaluate.append(ind) n_evaluations = 0 if len(to_evaluate) >", "VARCHAR PRIMARY KEY, fitness REAL NOT NULL)''') cursor.executemany('''REPLACE INTO history(robot_string, fitness) VALUES (?)''',", "with open(filename, 'wb') as cp_file: pickle.dump(checkpoint, cp_file) def record_info(logbook, stats, gen, pop, inv_ind):", "== 'modules_conf': print('\\t' * level + '{}: ['.format(k)) for element in v: print('\\t'", "fig = plt.figure(figsize=(12, 5)) ax = fig.gca() if conf['discrete_hist']: inds_vals = {str(sv): 0", "y, color=colors) else: _, bins, patches = ax.hist(conf['data'], bins=conf['bins'], density=conf['norm'], range=conf['h_range']) ax.set_xticks(bins) for", "N_MODULES, STIFF_TABLE # ================================== # # Utils functions # # ================================== # def", "# consider only new individuals (offsprings) invalid_ind = [ind for ind in individuals", "pop_stats['std_dev'], pop_stats['min'], pop_stats['max'] )) def evaluate_ind(toolbox, individuals, glob_history, local_history=None, coev=False, eval_all=False): \"\"\" :param", "'# modules', 'h_range': (2, 11), 'out_file': os.path.join(results_folder, 'n_modules', 'num_mods_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': N_MODULES, 'discrete_hist':", "pop_stats = { 'num_sims': num_sims, 'avg_fitness': np.mean(fitness_values), 'std_dev': np.std(fitness_values), 'min': np.min(fitness_values), 'max': np.max(fitness_values)", "ax.set_ylim(0, 1.1) plt.savefig(conf['out_file'], bbox_inches='tight') plt.close() def parse_robot_string(rb_string): robot = [] for module_str in", "k+':') print_dict(v, level+1, list_on_levels) elif isinstance(v, list) and list_on_levels and k == 'modules_conf':", "TASK') ) print('# ' + '=' * bar_num + ' #\\n') print('# '", "\"\"\" Store simulation history in provided db :param history: :param history_db: :return: \"\"\"", "for rob_string, fit in history.items(): out_file.write('{},{}\\n'.format(rob_string.strip(), fit[0])) # NOTE: these functions below are", "in enumerate(zip(bins, patches)): plt.setp(p, 'facecolor', colors[i]) ax.set_title('{} distribution across last generation'.format(conf['title']), fontweight='normal') ax.set_xlabel(conf['title'])", "creation functionality if local_history is not None: local_history[ind_string] = fit if not coev:", "300 # create potential needed directory where to store the graph os.makedirs(os.path.dirname(conf['out_file']), exist_ok=True)", "bar_num + ' #') print('\\t' + ' ' * 7 + 'EXPERIMENT CONFIGURATION')", "stats.compile(pop) if stats is not None else {} logbook.record(gen=gen, nevals=len(inv_ind), **record) def record_population(num_sims,", "that maps simulator input strings into corresponding computed fitness. :param local_history: :param coev:", "def print_header_contr_evo(config, bar_num=50): print('# ' + '=' * bar_num + ' #') print('\\t\\tEVOLUTIONARY", "#') def store_checkpoint(checkpoint, filename): with open(filename, 'wb') as cp_file: pickle.dump(checkpoint, cp_file) def record_info(logbook,", "conn.close() return history def store_history_db(history, history_db): \"\"\" Store simulation history in provided db", "is not None: local_history[ind_string] = fit if not coev: n_evaluations = len(individuals) return", "= np.asarray(list(map(lambda i: i.fitness.values, population))) if coev: fitness_values = [1.0 / f for", "'avg_fitness': np.mean(fitness_values), 'std_dev': np.std(fitness_values), 'min': np.min(fitness_values), 'max': np.max(fitness_values) } # store the current", "'bins': max(len(fits)//4, 1), 'discrete_hist': False, 'norm': normalize }, { 'data': n_mods, 'title': '#", "fitness glob_history[ind_string] = fit # store also record into a local history to", "to_evaluate.append(ind) n_evaluations = 0 if len(to_evaluate) > 0: fitnesses = toolbox.map(toolbox.evaluate, to_evaluate) n_evaluations", "'w') as out_file: out_file.write('rob_string,fitness\\n') for rob_string, fit in history.items(): out_file.write('{},{}\\n'.format(rob_string.strip(), fit[0])) # NOTE:", "normalize }, { 'data': stiffs, 'title': 'Stiffness', 'out_file': os.path.join(results_folder, 'stiffness', 'stiff_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins':", "fitness previously computed for the same configuration ind.fitness.values = glob_history[ind_string] # store also", "plt.close() def parse_robot_string(rb_string): robot = [] for module_str in rb_string.split('--')[:-1]: params = module_str.split('-')", "conf in configs: plot_population_dist(conf) def plot_population_dist(conf): \"\"\" Plot the distribution of the values", "conn = sqlite3.connect(history_db) cursor = conn.cursor() for robot_string, fitness in cursor.execute('SELECT * FROM", "'='*bar_num + ' #\\n') print('# ' + '='*bar_num + ' #') print('\\t' +", "# # ================================== # def load_history(history_file): \"\"\" Load simulation history from provided file.", "'std': pop_stats['std_dev'], 'min': pop_stats['min'], 'max': pop_stats['max'], 'skip': skips }) else: print('num_sims: {} |", "+ ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' '*7 + 'OF TENSEGRITY SOFT ROBOTS')", "'=' * bar_num + ' #') print('\\t\\tCO-EVOLUTIONARY SIMULATION\\n\\t' + ' ' * 7", ":param local_history: :param coev: :param eval_all: :return: \"\"\" # consider only new individuals", "print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' '*7 + 'OF TENSEGRITY SOFT ROBOTS') print('# ' +", "if pbar is not None: pbar.set_postfix({ 'avg': pop_stats['avg_fitness'], 'std': pop_stats['std_dev'], 'min': pop_stats['min'], 'max':", "history in provided file :param history: :param history_file: :return: \"\"\" with open(history_file, 'w')", "= sqlite3.connect(history_db) cursor = conn.cursor() for robot_string, fitness in cursor.execute('SELECT * FROM history'):", "{str(sv): 0 for sv in conf['bins']} for ind_stiff in conf['data']: inds_vals[str(ind_stiff)] += 1", "MANAGEMENT # # ================================== # def load_history(history_file): \"\"\" Load simulation history from provided", "load_history(history_file): \"\"\" Load simulation history from provided file. In case no file is", "# plot the properties distribution of last generation configs = [ { 'data':", "' '*7 + 'EXPERIMENT CONFIGURATION') print('# ' + '='*bar_num + ' #') print_dict(config)", "bar_num=55): print('# ' + '=' * bar_num + ' #') print('\\t\\tCO-EVOLUTIONARY SIMULATION\\n\\t' +", "normalize }, { 'data': n_mods, 'title': '# modules', 'h_range': (2, 11), 'out_file': os.path.join(results_folder,", "colors[i]) ax.set_title('{} distribution across last generation'.format(conf['title']), fontweight='normal') ax.set_xlabel(conf['title']) ax.set_ylabel('# Individuals') if conf['norm']: ax.set_ylim(0,", "dict): print('\\t'*level + k+':') print_dict(v, level+1, list_on_levels) elif isinstance(v, list) and list_on_levels and", ":return: \"\"\" history = {} if history_file is not None and os.path.exists(history_file): with", "\"\"\" Plot the fitness/num_modules/stiffness distribution across given robot population :param pop: population of", "if stats is not None else {} logbook.record(gen=gen, nevals=len(inv_ind), **record) def record_population(num_sims, population,", "= [] for module_str in rb_string.split('--')[:-1]: params = module_str.split('-') module = { 'order':", "'facecolor', colors[i]) ax.set_title('{} distribution across last generation'.format(conf['title']), fontweight='normal') ax.set_xlabel(conf['title']) ax.set_ylabel('# Individuals') if conf['norm']:", "bar_num + ' #') def store_checkpoint(checkpoint, filename): with open(filename, 'wb') as cp_file: pickle.dump(checkpoint,", "lists fits, n_mods, stiffs = list(zip(*[(ind.get_fitness(), ind.num_modules, ind.stiffness) for ind in pop])) #", "where to store the graph os.makedirs(os.path.dirname(conf['out_file']), exist_ok=True) num_colors = len(conf['bins']) if isinstance(conf['bins'], list)", "to support MAP creation functionality if local_history is not None: local_history[ind_string] = fit", "' ' * 7 + 'EXPERIMENT CONFIGURATION') print('# ' + '=' * bar_num", "+ ' ' * 7 + 'OF MORPHOLOGY AND CONTROLLER\\n\\t' + ' '", "print_dict(config, list_on_levels=False) print('# ' + '=' * bar_num + ' #') def print_header_single_map(config,", "stiffs = list(zip(*[(ind.get_fitness(), ind.num_modules, ind.stiffness) for ind in pop])) # plot the properties", "'OF MORPHOLOGY AND CONTROLLER\\n\\t' + ' ' * 8 + 'OF TENSEGRITY SOFT", "print('\\t\\tCO-EVOLUTIONARY SIMULATION\\n\\t' + ' ' * 7 + 'OF MORPHOLOGY AND CONTROLLER\\n\\t' +", "+= 1 x, y = list(zip(*[(k, v) for k, v in sorted(inds_vals.items(), key=lambda", "history records with latest fitness glob_history[ind_string] = fit # store also record into", "'max': np.max(fitness_values) } # store the current population values file.write(json.dumps({ **pop_stats, 'population': [individual.info(coev=coev)", "{} if history_file is not None and os.path.exists(history_file): with open(history_file) as history_in: #", "logbook.record(gen=gen, nevals=len(inv_ind), **record) def record_population(num_sims, population, file, skips, pbar=None, verbose=False, coev=False): fitness_values =", "{} | Fitness -> avg: {} std: {} min: {} max: {}'.format( pop_stats['num_sims'],", "TABLE history(robot_string VARCHAR PRIMARY KEY, fitness REAL NOT NULL)''') cursor.executemany('''REPLACE INTO history(robot_string, fitness)", "for the same configuration ind.fitness.values = glob_history[ind_string] # store also record into a", "* bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' OF TENSEGRITY SOFT ROBOTS", ")) def evaluate_ind(toolbox, individuals, glob_history, local_history=None, coev=False, eval_all=False): \"\"\" :param toolbox: :param individuals:", "properties distribution of last generation configs = [ { 'data': fits, 'title': 'Fitness',", "'rot': float(params[6].strip()), 'stiff': float(params[7].strip()) } robot.append(module) return robot # ================================== # # SIM", "+ ' ' * 8 + 'OF TENSEGRITY SOFT ROBOTS\\n\\t' + (' '", "+ '='*bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' '*7 + 'OF TENSEGRITY", "'(DOUBLE MAP-ELITES VARIANT)' ) print('# ' + '=' * bar_num + ' #\\n')", "' + '=' * bar_num + ' #') print_dict(config, list_on_levels=True) print('# ' +", "n_evaluations def plot_population_stats(pop, results_folder, seed, num_sim, normalize=False): \"\"\" Plot the fitness/num_modules/stiffness distribution across", "conn.cursor() for robot_string, fitness in cursor.execute('SELECT * FROM history'): history[robot_string] = (float(fitness),) cursor.close()", "'FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path'] else ' '", "' + '=' * bar_num + ' #') print('\\t\\tEVOLUTIONARY SIMULATION\\n\\t' + ' OF", "for record in history.items(): yield record to_init = not os.path.exists(history_db) conn = sqlite3.connect(history_db)", "N_MODULES, 'discrete_hist': True, 'norm': normalize }, { 'data': stiffs, 'title': 'Stiffness', 'out_file': os.path.join(results_folder,", "if local_history is not None: local_history[ind_string] = glob_history[ind_string] skip.append(ind) else: to_evaluate.append(ind) n_evaluations =", "+ 'FOR GOAL REACHING TASK') ) print('# ' + '=' * bar_num +", "needed directory where to store the graph os.makedirs(os.path.dirname(conf['out_file']), exist_ok=True) num_colors = len(conf['bins']) if", "[] to_evaluate = [] for ind in invalid_ind: ind_string = ind.string_input() if not", "distribution of last generation configs = [ { 'data': fits, 'title': 'Fitness', 'h_range':", "history'): history[robot_string] = (float(fitness),) cursor.close() conn.close() return history def store_history_db(history, history_db): \"\"\" Store", "return to_evaluate + skip, len(skip), n_evaluations def plot_population_stats(pop, results_folder, seed, num_sim, normalize=False): \"\"\"", "+ ' #\\n') print('# ' + '='*bar_num + ' #') print('\\t' + '", "'data': stiffs, 'title': 'Stiffness', 'out_file': os.path.join(results_folder, 'stiffness', 'stiff_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': STIFF_TABLE, 'discrete_hist': True,", "'title': 'Stiffness', 'out_file': os.path.join(results_folder, 'stiffness', 'stiff_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': STIFF_TABLE, 'discrete_hist': True, 'norm': normalize", "'=' * bar_num + ' #') print_dict(config, list_on_levels=False) print('# ' + '=' *", "history :param history_db: :return: \"\"\" history = {} if history_db is not None:", "or (coev and eval_all))] # select for evaluation only solutions # that have", "individual in population] })) if verbose: if pbar is not None: pbar.set_postfix({ 'avg':", "print('\\t'*level + k+':') print_dict(v, level+1, list_on_levels) elif isinstance(v, list) and list_on_levels and k", "0 for sv in conf['bins']} for ind_stiff in conf['data']: inds_vals[str(ind_stiff)] += 1 x,", "return robot # ================================== # # SIM HISTORY MANAGEMENT # # ================================== #", "glob_history[ind_string] skip.append(ind) else: to_evaluate.append(ind) n_evaluations = 0 if len(to_evaluate) > 0: fitnesses =", "= ax.hist(conf['data'], bins=conf['bins'], density=conf['norm'], range=conf['h_range']) ax.set_xticks(bins) for i, (c, p) in enumerate(zip(bins, patches)):", "+ '=' * bar_num + ' #') print_dict(config, list_on_levels=True) print('# ' + '='", "# ================================== # # Utils functions # # ================================== # def print_dict(d, level=0,", "if isinstance(conf['bins'], list) else conf['bins'] colors = plt.cm.viridis(np.linspace(0, 1, num_colors)) fig = plt.figure(figsize=(12,", "pickle import sqlite3 import numpy as np import matplotlib # select matplotlib backend", "True, 'norm': normalize }, { 'data': stiffs, 'title': 'Stiffness', 'out_file': os.path.join(results_folder, 'stiffness', 'stiff_dist_sim_{}_{}.pdf'.format(seed,", "fitnesses): ind.fitness.values = fit ind_string = ind.string_input() # update history records with latest", "'{}: ['.format(k)) for element in v: print('\\t' * (level+1) + '{}'.format(element)) print('\\t' *", "num_sim)), 'bins': max(len(fits)//4, 1), 'discrete_hist': False, 'norm': normalize }, { 'data': n_mods, 'title':", "plot_population_dist(conf) def plot_population_dist(conf): \"\"\" Plot the distribution of the values of given property", "history from provided file. In case no file is given, returns an empty", "'=' * bar_num + ' #') def print_header_single_map(config, bar_num=55): print('# ' + '='", "record = stats.compile(pop) if stats is not None else {} logbook.record(gen=gen, nevals=len(inv_ind), **record)", "across given robot population :param pop: population of robot individuals :param results_folder: folder", "create potential needed directory where to store the graph os.makedirs(os.path.dirname(conf['out_file']), exist_ok=True) num_colors =", "if conf['norm']: ax.set_ylim(0, 1.1) plt.savefig(conf['out_file'], bbox_inches='tight') plt.close() def parse_robot_string(rb_string): robot = [] for", "the plots in a normalized manner (default: False) :return: \"\"\" # split robots", "# create potential needed directory where to store the graph os.makedirs(os.path.dirname(conf['out_file']), exist_ok=True) num_colors", "* bar_num + ' #') def print_header_double_map(config, bar_num=55): print('# ' + '=' *", "y = list(zip(*[(k, v) for k, v in sorted(inds_vals.items(), key=lambda r: float(r[0]))])) if", "population values file.write(json.dumps({ **pop_stats, 'population': [individual.info(coev=coev) for individual in population] })) if verbose:", "{} min: {} max: {}'.format( pop_stats['num_sims'], pop_stats['avg_fitness'], pop_stats['std_dev'], pop_stats['min'], pop_stats['max'] )) def evaluate_ind(toolbox,", "+ '=' * bar_num + ' #') def print_header_double_map(config, bar_num=55): print('# ' +", "individuals :param results_folder: folder where to store the plots :param seed: simulation seed", "REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path'] else ' ' * 10", "# assign fitness previously computed for the same configuration ind.fitness.values = glob_history[ind_string] #", "* 2 + 'FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path']", "* bar_num + ' #') def print_header_coev(config, bar_num=55): print('# ' + '=' *", "7 + 'OF MORPHOLOGY AND CONTROLLER\\n\\t' + ' ' * 8 + 'OF", "store the graph os.makedirs(os.path.dirname(conf['out_file']), exist_ok=True) num_colors = len(conf['bins']) if isinstance(conf['bins'], list) else conf['bins']", "= 15 matplotlib.rcParams['figure.dpi'] = 300 # create potential needed directory where to store", "store the plots :param seed: simulation seed :param num_sim: simulation id :param normalize:", "num_sim)), 'bins': STIFF_TABLE, 'discrete_hist': True, 'norm': normalize } ] for conf in configs:", "bar_num + ' #') def print_header_double_map(config, bar_num=55): print('# ' + '=' * bar_num", "'stiff_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': STIFF_TABLE, 'discrete_hist': True, 'norm': normalize } ] for conf in", "verbose=False, coev=False): fitness_values = np.asarray(list(map(lambda i: i.fitness.values, population))) if coev: fitness_values = [1.0", "+ 'OF TENSEGRITY SOFT ROBOTS') print('# ' + '='*bar_num + ' #\\n') print('#", "os.path.join(results_folder, 'n_modules', 'num_mods_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins': N_MODULES, 'discrete_hist': True, 'norm': normalize }, { 'data':", "<filename>ea_sim/utils.py<gh_stars>1-10 import os import json import pickle import sqlite3 import numpy as np", "conf['discrete_hist']: inds_vals = {str(sv): 0 for sv in conf['bins']} for ind_stiff in conf['data']:", "print('# ' + '=' * bar_num + ' #') print('\\t\\tCO-EVOLUTIONARY SIMULATION\\n\\t' + '", "10 + 'FOR GOAL REACHING TASK') + '\\n\\t' + ' ' * 7", "db :param history: :param history_db: :return: \"\"\" def history_gen(): for record in history.items():", "results_folder, seed, num_sim, normalize=False): \"\"\" Plot the fitness/num_modules/stiffness distribution across given robot population", "import numpy as np import matplotlib # select matplotlib backend matplotlib.use('pdf') import matplotlib.pyplot", "# skip header history_in.readline() for line in history_in: robot_string, fitness = line.strip().split(',') history[robot_string.strip()]", "def record_population(num_sims, population, file, skips, pbar=None, verbose=False, coev=False): fitness_values = np.asarray(list(map(lambda i: i.fitness.values,", "evaluated skip = [] to_evaluate = [] for ind in invalid_ind: ind_string =", "range=conf['h_range']) ax.set_xticks(bins) for i, (c, p) in enumerate(zip(bins, patches)): plt.setp(p, 'facecolor', colors[i]) ax.set_title('{}", "avg: {} std: {} min: {} max: {}'.format( pop_stats['num_sims'], pop_stats['avg_fitness'], pop_stats['std_dev'], pop_stats['min'], pop_stats['max']", "f for f in fitness_values] pop_stats = { 'num_sims': num_sims, 'avg_fitness': np.mean(fitness_values), 'std_dev':", "glob_history: # assign fitness previously computed for the same configuration ind.fitness.values = glob_history[ind_string]", "['.format(k)) for element in v: print('\\t' * (level+1) + '{}'.format(element)) print('\\t' * level", "cursor = conn.cursor() for robot_string, fitness in cursor.execute('SELECT * FROM history'): history[robot_string] =", "' #') def print_header_double_map(config, bar_num=55): print('# ' + '=' * bar_num + '", "history_in.readline() for line in history_in: robot_string, fitness = line.strip().split(',') history[robot_string.strip()] = (float(fitness),) return", "bar_num + ' #') def print_header_coev(config, bar_num=55): print('# ' + '=' * bar_num", "/ f for f in fitness_values] pop_stats = { 'num_sims': num_sims, 'avg_fitness': np.mean(fitness_values),", "{ 'data': n_mods, 'title': '# modules', 'h_range': (2, 11), 'out_file': os.path.join(results_folder, 'n_modules', 'num_mods_dist_sim_{}_{}.pdf'.format(seed,", "OF TENSEGRITY SOFT ROBOTS CONTROLLERS \\n\\t' + ('FOR GOAL REACHING AFTER SQUEEZING TASK'", "+ k+':') print_dict(v, level+1, list_on_levels) elif isinstance(v, list) and list_on_levels and k ==", "level+1, list_on_levels) elif isinstance(v, list) and list_on_levels and k == 'modules_conf': print('\\t' *", "evaluate_ind(toolbox, individuals, glob_history, local_history=None, coev=False, eval_all=False): \"\"\" :param toolbox: :param individuals: :param glob_history:", "+ '='*bar_num + ' #') print_dict(config) print('# ' + '='*bar_num + ' #')", "}, { 'data': n_mods, 'title': '# modules', 'h_range': (2, 11), 'out_file': os.path.join(results_folder, 'n_modules',", "ROBOTS CONTROLLERS \\n\\t' + ('FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in", "= plt.figure(figsize=(12, 5)) ax = fig.gca() if conf['discrete_hist']: inds_vals = {str(sv): 0 for", "three lists fits, n_mods, stiffs = list(zip(*[(ind.get_fitness(), ind.num_modules, ind.stiffness) for ind in pop]))", "x, y = list(zip(*[(k, v) for k, v in sorted(inds_vals.items(), key=lambda r: float(r[0]))]))", "+ (' ' * 2 + 'FOR GOAL REACHING AFTER SQUEEZING TASK' if", "as np import matplotlib # select matplotlib backend matplotlib.use('pdf') import matplotlib.pyplot as plt", "+ ' ' * 7 + '(DOUBLE MAP-ELITES VARIANT)' ) print('# ' +", "simulation history from provided file. In case no file is given, returns an", "computed for the same configuration ind.fitness.values = glob_history[ind_string] # store also record into", "for k, v in sorted(inds_vals.items(), key=lambda r: float(r[0]))])) if conf['norm']: y = np.asarray(y)/np.sum(y)", "' OF TENSEGRITY SOFT ROBOTS CONTROLLERS \\n\\t' + ('FOR GOAL REACHING AFTER SQUEEZING", "+ ' '*7 + 'OF TENSEGRITY SOFT ROBOTS') print('# ' + '='*bar_num +", "n_mods, 'title': '# modules', 'h_range': (2, 11), 'out_file': os.path.join(results_folder, 'n_modules', 'num_mods_dist_sim_{}_{}.pdf'.format(seed, num_sim)), 'bins':", "stats is not None else {} logbook.record(gen=gen, nevals=len(inv_ind), **record) def record_population(num_sims, population, file," ]
[ "str(value) else: value = str(value) self.modifier = Text(value, \"rimouski sb.ttf\", white, 60) self.modifierBack", "About import About from Text import * from color import * class Card(pygame.sprite.Sprite):", "getCard(self) self.image.set_alpha() def addModifier(self, value): \"\"\"Add bonus or malus to the card and", "allCards[self.number] self.image = None #self.verso = carteVerso self.About = About(self.name, self) # We", "values from About import About from Text import * from color import *", "self.values.append(self.top) self.values.append(self.right) self.values.append(self.bottom) self.values.append(self.left) self.parseToInt() # Which element self.elementName = values[number][4] # Offensive", "self.values = [] self.values.append(self.top) self.values.append(self.right) self.values.append(self.bottom) self.values.append(self.left) self.parseToInt() # Which element self.elementName =", "= Text(value, \"rimouski sb.ttf\", white, 60) self.modifierBack = Text(value, \"rimouski sb.ttf\", black, 60)", "Unused for now self.type = values[number][5] self.modifierValue = 0 self.inHand = 1 getCard(self)", "# coding: utf-8 import pygame import os from functions import * from color", "to the card and draw the bonus on the card\"\"\" self.modifierValue = value", "= 10 else: self.values[i] = int(self.values[i]) def __repr__(self): return \"<Card at %s >\"", "the game\"\"\" def __init__(self, number, owner): super(pygame.sprite.Sprite).__init__(Card) self.owner = owner self.number = number", "self.values.append(self.left) self.parseToInt() # Which element self.elementName = values[number][4] # Offensive or defensive. Unused", "self.modifier.rect) for i in range(0, 4): self.values[i] += self.modifierValue def addCursor(self): \"\"\"Add a", "self.values.append(self.bottom) self.values.append(self.left) self.parseToInt() # Which element self.elementName = values[number][4] # Offensive or defensive.", "according to listeCartes.py self.top = values[number][0] self.right = values[number][1] self.bottom = values[number][2] self.left", "self.borderRect = loadImage(\"images/border.png\") def parseToInt(self): for i in range(0, 4): if (self.values[i] ==", "About(self.name, self) # We put the numbers of the card according to listeCartes.py", "Text(value, \"rimouski sb.ttf\", black, 60) #self.modifier.rect.topleft = self.rect.topleft self.modifier.rect.move_ip(35, 15) self.modifierBack.rect.move_ip(38, 18) self.image.blit(self.modifierBack.surface,", "self.modifierBack = Text(value, \"rimouski sb.ttf\", black, 60) #self.modifier.rect.topleft = self.rect.topleft self.modifier.rect.move_ip(35, 15) self.modifierBack.rect.move_ip(38,", "listeCartes.py self.top = values[number][0] self.right = values[number][1] self.bottom = values[number][2] self.left = values[number][3]", "= str(value) self.modifier = Text(value, \"rimouski sb.ttf\", white, 60) self.modifierBack = Text(value, \"rimouski", "pygame.locals import * from listOfCards import allCards, values from About import About from", "import * from color import * class Card(pygame.sprite.Sprite): \"\"\"Manages the cards in the", "* class Card(pygame.sprite.Sprite): \"\"\"Manages the cards in the game\"\"\" def __init__(self, number, owner):", "to the focused card\"\"\" self.border, self.borderRect = loadImage(\"images/border.png\") def parseToInt(self): for i in", "parseToInt(self): for i in range(0, 4): if (self.values[i] == 'A'): self.values[i] = 10", "if (self.values[i] == 'A'): self.values[i] = 10 else: self.values[i] = int(self.values[i]) def __repr__(self):", "= values[number][4] # Offensive or defensive. Unused for now self.type = values[number][5] self.modifierValue", "bonus on the card\"\"\" self.modifierValue = value if value > 0: value =", "self.modifier = Text(value, \"rimouski sb.ttf\", white, 60) self.modifierBack = Text(value, \"rimouski sb.ttf\", black,", "malus to the card and draw the bonus on the card\"\"\" self.modifierValue =", "self.image.blit(self.modifierBack.surface, self.modifierBack.rect) self.image.blit(self.modifier.surface, self.modifier.rect) for i in range(0, 4): self.values[i] += self.modifierValue def", "self) # We put the numbers of the card according to listeCartes.py self.top", "def __init__(self, number, owner): super(pygame.sprite.Sprite).__init__(Card) self.owner = owner self.number = number self.name =", "coding: utf-8 import pygame import os from functions import * from color import", "and draw the bonus on the card\"\"\" self.modifierValue = value if value >", "range(0, 4): self.values[i] += self.modifierValue def addCursor(self): \"\"\"Add a colored border to the", "loadImage(\"images/border.png\") def parseToInt(self): for i in range(0, 4): if (self.values[i] == 'A'): self.values[i]", "self.owner = owner self.number = number self.name = allCards[self.number] self.image = None #self.verso", "= allCards[self.number] self.image = None #self.verso = carteVerso self.About = About(self.name, self) #", "number, owner): super(pygame.sprite.Sprite).__init__(Card) self.owner = owner self.number = number self.name = allCards[self.number] self.image", "We put the numbers of the card according to listeCartes.py self.top = values[number][0]", "'A'): self.values[i] = 10 else: self.values[i] = int(self.values[i]) def __repr__(self): return \"<Card at", "card according to listeCartes.py self.top = values[number][0] self.right = values[number][1] self.bottom = values[number][2]", "60) #self.modifier.rect.topleft = self.rect.topleft self.modifier.rect.move_ip(35, 15) self.modifierBack.rect.move_ip(38, 18) self.image.blit(self.modifierBack.surface, self.modifierBack.rect) self.image.blit(self.modifier.surface, self.modifier.rect) for", "= values[number][5] self.modifierValue = 0 self.inHand = 1 getCard(self) self.rect = self.image.get_rect() if", "or defensive. Unused for now self.type = values[number][5] self.modifierValue = 0 self.inHand =", "of the card according to listeCartes.py self.top = values[number][0] self.right = values[number][1] self.bottom", "import * class Card(pygame.sprite.Sprite): \"\"\"Manages the cards in the game\"\"\" def __init__(self, number,", "import * from pygame.locals import * from listOfCards import allCards, values from About", "= 1 getCard(self) self.rect = self.image.get_rect() if self.elementName != None: self.element, self.elementRect =", "self.inHand = 1 getCard(self) self.rect = self.image.get_rect() if self.elementName != None: self.element, self.elementRect", "a colored border to the focused card\"\"\" self.border, self.borderRect = loadImage(\"images/border.png\") def parseToInt(self):", "in range(0, 4): self.values[i] += self.modifierValue def addCursor(self): \"\"\"Add a colored border to", "self.modifierBack.rect.move_ip(38, 18) self.image.blit(self.modifierBack.surface, self.modifierBack.rect) self.image.blit(self.modifier.surface, self.modifier.rect) for i in range(0, 4): self.values[i] +=", "values[number][3] self.values = [] self.values.append(self.top) self.values.append(self.right) self.values.append(self.bottom) self.values.append(self.left) self.parseToInt() # Which element self.elementName", "for now self.type = values[number][5] self.modifierValue = 0 self.inHand = 1 getCard(self) self.rect", "\"+\" + str(value) else: value = str(value) self.modifier = Text(value, \"rimouski sb.ttf\", white,", "self.modifierBack.rect) self.image.blit(self.modifier.surface, self.modifier.rect) for i in range(0, 4): self.values[i] += self.modifierValue def addCursor(self):", "self.rect = self.image.get_rect() if self.elementName != None: self.element, self.elementRect = loadElement(self.elementName) self.elementRect.topright =", "listOfCards import allCards, values from About import About from Text import * from", "= loadImage(\"images/border.png\") def parseToInt(self): for i in range(0, 4): if (self.values[i] == 'A'):", "value = \"+\" + str(value) else: value = str(value) self.modifier = Text(value, \"rimouski", "in range(0, 4): if (self.values[i] == 'A'): self.values[i] = 10 else: self.values[i] =", "18) self.image.blit(self.modifierBack.surface, self.modifierBack.rect) self.image.blit(self.modifier.surface, self.modifier.rect) for i in range(0, 4): self.values[i] += self.modifierValue", "4): if (self.values[i] == 'A'): self.values[i] = 10 else: self.values[i] = int(self.values[i]) def", "element self.elementName = values[number][4] # Offensive or defensive. Unused for now self.type =", "self.modifierValue def addCursor(self): \"\"\"Add a colored border to the focused card\"\"\" self.border, self.borderRect", "1 getCard(self) self.rect = self.image.get_rect() if self.elementName != None: self.element, self.elementRect = loadElement(self.elementName)", "+= self.modifierValue def addCursor(self): \"\"\"Add a colored border to the focused card\"\"\" self.border,", "self.type = values[number][5] self.modifierValue = 0 self.inHand = 1 getCard(self) self.rect = self.image.get_rect()", "addModifier(self, value): \"\"\"Add bonus or malus to the card and draw the bonus", "* from color import * from pygame.locals import * from listOfCards import allCards,", "self.About = About(self.name, self) # We put the numbers of the card according", "self.bottom = values[number][2] self.left = values[number][3] self.values = [] self.values.append(self.top) self.values.append(self.right) self.values.append(self.bottom) self.values.append(self.left)", "= \"+\" + str(value) else: value = str(value) self.modifier = Text(value, \"rimouski sb.ttf\",", "import * from listOfCards import allCards, values from About import About from Text", "# Offensive or defensive. Unused for now self.type = values[number][5] self.modifierValue = 0", "owner self.number = number self.name = allCards[self.number] self.image = None #self.verso = carteVerso", "import allCards, values from About import About from Text import * from color", "15) self.modifierBack.rect.move_ip(38, 18) self.image.blit(self.modifierBack.surface, self.modifierBack.rect) self.image.blit(self.modifier.surface, self.modifier.rect) for i in range(0, 4): self.values[i]", "= Text(value, \"rimouski sb.ttf\", black, 60) #self.modifier.rect.topleft = self.rect.topleft self.modifier.rect.move_ip(35, 15) self.modifierBack.rect.move_ip(38, 18)", "for i in range(0, 4): self.values[i] += self.modifierValue def addCursor(self): \"\"\"Add a colored", "None #self.verso = carteVerso self.About = About(self.name, self) # We put the numbers", "cards in the game\"\"\" def __init__(self, number, owner): super(pygame.sprite.Sprite).__init__(Card) self.owner = owner self.number", "numbers of the card according to listeCartes.py self.top = values[number][0] self.right = values[number][1]", "self.top = values[number][0] self.right = values[number][1] self.bottom = values[number][2] self.left = values[number][3] self.values", "= value if value > 0: value = \"+\" + str(value) else: value", "self.border, self.borderRect = loadImage(\"images/border.png\") def parseToInt(self): for i in range(0, 4): if (self.values[i]", "self.elementRect = loadElement(self.elementName) self.elementRect.topright = self.rect.topright self.elementRect.move_ip(-2, 2) self.image.blit(self.element, self.elementRect) def changeOwner(self): getCard(self)", "Card(pygame.sprite.Sprite): \"\"\"Manages the cards in the game\"\"\" def __init__(self, number, owner): super(pygame.sprite.Sprite).__init__(Card) self.owner", "> 0: value = \"+\" + str(value) else: value = str(value) self.modifier =", "from pygame.locals import * from listOfCards import allCards, values from About import About", "card\"\"\" self.modifierValue = value if value > 0: value = \"+\" + str(value)", "put the numbers of the card according to listeCartes.py self.top = values[number][0] self.right", "Text import * from color import * class Card(pygame.sprite.Sprite): \"\"\"Manages the cards in", "self.image.blit(self.element, self.elementRect) def changeOwner(self): getCard(self) self.image.set_alpha() def addModifier(self, value): \"\"\"Add bonus or malus", "sb.ttf\", white, 60) self.modifierBack = Text(value, \"rimouski sb.ttf\", black, 60) #self.modifier.rect.topleft = self.rect.topleft", "= About(self.name, self) # We put the numbers of the card according to", "card\"\"\" self.border, self.borderRect = loadImage(\"images/border.png\") def parseToInt(self): for i in range(0, 4): if", "i in range(0, 4): if (self.values[i] == 'A'): self.values[i] = 10 else: self.values[i]", "#self.verso = carteVerso self.About = About(self.name, self) # We put the numbers of", "self.parseToInt() # Which element self.elementName = values[number][4] # Offensive or defensive. Unused for", "= values[number][3] self.values = [] self.values.append(self.top) self.values.append(self.right) self.values.append(self.bottom) self.values.append(self.left) self.parseToInt() # Which element", "self.right = values[number][1] self.bottom = values[number][2] self.left = values[number][3] self.values = [] self.values.append(self.top)", "\"rimouski sb.ttf\", black, 60) #self.modifier.rect.topleft = self.rect.topleft self.modifier.rect.move_ip(35, 15) self.modifierBack.rect.move_ip(38, 18) self.image.blit(self.modifierBack.surface, self.modifierBack.rect)", "= carteVerso self.About = About(self.name, self) # We put the numbers of the", "values[number][4] # Offensive or defensive. Unused for now self.type = values[number][5] self.modifierValue =", "0: value = \"+\" + str(value) else: value = str(value) self.modifier = Text(value,", "white, 60) self.modifierBack = Text(value, \"rimouski sb.ttf\", black, 60) #self.modifier.rect.topleft = self.rect.topleft self.modifier.rect.move_ip(35,", "= values[number][2] self.left = values[number][3] self.values = [] self.values.append(self.top) self.values.append(self.right) self.values.append(self.bottom) self.values.append(self.left) self.parseToInt()", "def addModifier(self, value): \"\"\"Add bonus or malus to the card and draw the", "self.elementName = values[number][4] # Offensive or defensive. Unused for now self.type = values[number][5]", "= loadElement(self.elementName) self.elementRect.topright = self.rect.topright self.elementRect.move_ip(-2, 2) self.image.blit(self.element, self.elementRect) def changeOwner(self): getCard(self) self.image.set_alpha()", "value): \"\"\"Add bonus or malus to the card and draw the bonus on", "+ str(value) else: value = str(value) self.modifier = Text(value, \"rimouski sb.ttf\", white, 60)", "value = str(value) self.modifier = Text(value, \"rimouski sb.ttf\", white, 60) self.modifierBack = Text(value,", "import pygame import os from functions import * from color import * from", "About from Text import * from color import * class Card(pygame.sprite.Sprite): \"\"\"Manages the", "self.values.append(self.right) self.values.append(self.bottom) self.values.append(self.left) self.parseToInt() # Which element self.elementName = values[number][4] # Offensive or", "values[number][5] self.modifierValue = 0 self.inHand = 1 getCard(self) self.rect = self.image.get_rect() if self.elementName", "os from functions import * from color import * from pygame.locals import *", "self.values[i] = 10 else: self.values[i] = int(self.values[i]) def __repr__(self): return \"<Card at %s", "4): self.values[i] += self.modifierValue def addCursor(self): \"\"\"Add a colored border to the focused", "value if value > 0: value = \"+\" + str(value) else: value =", "addCursor(self): \"\"\"Add a colored border to the focused card\"\"\" self.border, self.borderRect = loadImage(\"images/border.png\")", "= owner self.number = number self.name = allCards[self.number] self.image = None #self.verso =", "from functions import * from color import * from pygame.locals import * from", "from color import * from pygame.locals import * from listOfCards import allCards, values", "class Card(pygame.sprite.Sprite): \"\"\"Manages the cards in the game\"\"\" def __init__(self, number, owner): super(pygame.sprite.Sprite).__init__(Card)", "Which element self.elementName = values[number][4] # Offensive or defensive. Unused for now self.type", "black, 60) #self.modifier.rect.topleft = self.rect.topleft self.modifier.rect.move_ip(35, 15) self.modifierBack.rect.move_ip(38, 18) self.image.blit(self.modifierBack.surface, self.modifierBack.rect) self.image.blit(self.modifier.surface, self.modifier.rect)", "= self.rect.topleft self.modifier.rect.move_ip(35, 15) self.modifierBack.rect.move_ip(38, 18) self.image.blit(self.modifierBack.surface, self.modifierBack.rect) self.image.blit(self.modifier.surface, self.modifier.rect) for i in", "from About import About from Text import * from color import * class", "carteVerso self.About = About(self.name, self) # We put the numbers of the card", "# Which element self.elementName = values[number][4] # Offensive or defensive. Unused for now", "None: self.element, self.elementRect = loadElement(self.elementName) self.elementRect.topright = self.rect.topright self.elementRect.move_ip(-2, 2) self.image.blit(self.element, self.elementRect) def", "colored border to the focused card\"\"\" self.border, self.borderRect = loadImage(\"images/border.png\") def parseToInt(self): for", "for i in range(0, 4): if (self.values[i] == 'A'): self.values[i] = 10 else:", "self.modifier.rect.move_ip(35, 15) self.modifierBack.rect.move_ip(38, 18) self.image.blit(self.modifierBack.surface, self.modifierBack.rect) self.image.blit(self.modifier.surface, self.modifier.rect) for i in range(0, 4):", "self.image.get_rect() if self.elementName != None: self.element, self.elementRect = loadElement(self.elementName) self.elementRect.topright = self.rect.topright self.elementRect.move_ip(-2,", "draw the bonus on the card\"\"\" self.modifierValue = value if value > 0:", "self.modifierValue = value if value > 0: value = \"+\" + str(value) else:", "getCard(self) self.rect = self.image.get_rect() if self.elementName != None: self.element, self.elementRect = loadElement(self.elementName) self.elementRect.topright", "def parseToInt(self): for i in range(0, 4): if (self.values[i] == 'A'): self.values[i] =", "Text(value, \"rimouski sb.ttf\", white, 60) self.modifierBack = Text(value, \"rimouski sb.ttf\", black, 60) #self.modifier.rect.topleft", "\"rimouski sb.ttf\", white, 60) self.modifierBack = Text(value, \"rimouski sb.ttf\", black, 60) #self.modifier.rect.topleft =", "value > 0: value = \"+\" + str(value) else: value = str(value) self.modifier", "= number self.name = allCards[self.number] self.image = None #self.verso = carteVerso self.About =", "== 'A'): self.values[i] = 10 else: self.values[i] = int(self.values[i]) def __repr__(self): return \"<Card", "import About from Text import * from color import * class Card(pygame.sprite.Sprite): \"\"\"Manages", "* from color import * class Card(pygame.sprite.Sprite): \"\"\"Manages the cards in the game\"\"\"", "values[number][0] self.right = values[number][1] self.bottom = values[number][2] self.left = values[number][3] self.values = []", "* from listOfCards import allCards, values from About import About from Text import", "the card according to listeCartes.py self.top = values[number][0] self.right = values[number][1] self.bottom =", "[] self.values.append(self.top) self.values.append(self.right) self.values.append(self.bottom) self.values.append(self.left) self.parseToInt() # Which element self.elementName = values[number][4] #", "#self.modifier.rect.topleft = self.rect.topleft self.modifier.rect.move_ip(35, 15) self.modifierBack.rect.move_ip(38, 18) self.image.blit(self.modifierBack.surface, self.modifierBack.rect) self.image.blit(self.modifier.surface, self.modifier.rect) for i", "the bonus on the card\"\"\" self.modifierValue = value if value > 0: value", "2) self.image.blit(self.element, self.elementRect) def changeOwner(self): getCard(self) self.image.set_alpha() def addModifier(self, value): \"\"\"Add bonus or", "(self.values[i] == 'A'): self.values[i] = 10 else: self.values[i] = int(self.values[i]) def __repr__(self): return", "self.image.set_alpha() def addModifier(self, value): \"\"\"Add bonus or malus to the card and draw", "the numbers of the card according to listeCartes.py self.top = values[number][0] self.right =", "to listeCartes.py self.top = values[number][0] self.right = values[number][1] self.bottom = values[number][2] self.left =", "= self.rect.topright self.elementRect.move_ip(-2, 2) self.image.blit(self.element, self.elementRect) def changeOwner(self): getCard(self) self.image.set_alpha() def addModifier(self, value):", "the card and draw the bonus on the card\"\"\" self.modifierValue = value if", "def changeOwner(self): getCard(self) self.image.set_alpha() def addModifier(self, value): \"\"\"Add bonus or malus to the", "0 self.inHand = 1 getCard(self) self.rect = self.image.get_rect() if self.elementName != None: self.element,", "the card\"\"\" self.modifierValue = value if value > 0: value = \"+\" +", "self.elementRect) def changeOwner(self): getCard(self) self.image.set_alpha() def addModifier(self, value): \"\"\"Add bonus or malus to", "owner): super(pygame.sprite.Sprite).__init__(Card) self.owner = owner self.number = number self.name = allCards[self.number] self.image =", "else: self.values[i] = int(self.values[i]) def __repr__(self): return \"<Card at %s >\" % (self.rect)", "self.modifierValue = 0 self.inHand = 1 getCard(self) self.rect = self.image.get_rect() if self.elementName !=", "game\"\"\" def __init__(self, number, owner): super(pygame.sprite.Sprite).__init__(Card) self.owner = owner self.number = number self.name", "the cards in the game\"\"\" def __init__(self, number, owner): super(pygame.sprite.Sprite).__init__(Card) self.owner = owner", "self.elementName != None: self.element, self.elementRect = loadElement(self.elementName) self.elementRect.topright = self.rect.topright self.elementRect.move_ip(-2, 2) self.image.blit(self.element,", "number self.name = allCards[self.number] self.image = None #self.verso = carteVerso self.About = About(self.name,", "= values[number][0] self.right = values[number][1] self.bottom = values[number][2] self.left = values[number][3] self.values =", "if value > 0: value = \"+\" + str(value) else: value = str(value)", "= self.image.get_rect() if self.elementName != None: self.element, self.elementRect = loadElement(self.elementName) self.elementRect.topright = self.rect.topright", "self.left = values[number][3] self.values = [] self.values.append(self.top) self.values.append(self.right) self.values.append(self.bottom) self.values.append(self.left) self.parseToInt() # Which", "self.values[i] += self.modifierValue def addCursor(self): \"\"\"Add a colored border to the focused card\"\"\"", "from color import * class Card(pygame.sprite.Sprite): \"\"\"Manages the cards in the game\"\"\" def", "color import * from pygame.locals import * from listOfCards import allCards, values from", "on the card\"\"\" self.modifierValue = value if value > 0: value = \"+\"", "import * from color import * from pygame.locals import * from listOfCards import", "now self.type = values[number][5] self.modifierValue = 0 self.inHand = 1 getCard(self) self.rect =", "defensive. Unused for now self.type = values[number][5] self.modifierValue = 0 self.inHand = 1", "= values[number][1] self.bottom = values[number][2] self.left = values[number][3] self.values = [] self.values.append(self.top) self.values.append(self.right)", "\"\"\"Add a colored border to the focused card\"\"\" self.border, self.borderRect = loadImage(\"images/border.png\") def", "pygame import os from functions import * from color import * from pygame.locals", "or malus to the card and draw the bonus on the card\"\"\" self.modifierValue", "values[number][2] self.left = values[number][3] self.values = [] self.values.append(self.top) self.values.append(self.right) self.values.append(self.bottom) self.values.append(self.left) self.parseToInt() #", "Offensive or defensive. Unused for now self.type = values[number][5] self.modifierValue = 0 self.inHand", "changeOwner(self): getCard(self) self.image.set_alpha() def addModifier(self, value): \"\"\"Add bonus or malus to the card", "focused card\"\"\" self.border, self.borderRect = loadImage(\"images/border.png\") def parseToInt(self): for i in range(0, 4):", "__init__(self, number, owner): super(pygame.sprite.Sprite).__init__(Card) self.owner = owner self.number = number self.name = allCards[self.number]", "* from pygame.locals import * from listOfCards import allCards, values from About import", "sb.ttf\", black, 60) #self.modifier.rect.topleft = self.rect.topleft self.modifier.rect.move_ip(35, 15) self.modifierBack.rect.move_ip(38, 18) self.image.blit(self.modifierBack.surface, self.modifierBack.rect) self.image.blit(self.modifier.surface,", "self.element, self.elementRect = loadElement(self.elementName) self.elementRect.topright = self.rect.topright self.elementRect.move_ip(-2, 2) self.image.blit(self.element, self.elementRect) def changeOwner(self):", "= None #self.verso = carteVerso self.About = About(self.name, self) # We put the", "self.rect.topleft self.modifier.rect.move_ip(35, 15) self.modifierBack.rect.move_ip(38, 18) self.image.blit(self.modifierBack.surface, self.modifierBack.rect) self.image.blit(self.modifier.surface, self.modifier.rect) for i in range(0,", "if self.elementName != None: self.element, self.elementRect = loadElement(self.elementName) self.elementRect.topright = self.rect.topright self.elementRect.move_ip(-2, 2)", "self.elementRect.move_ip(-2, 2) self.image.blit(self.element, self.elementRect) def changeOwner(self): getCard(self) self.image.set_alpha() def addModifier(self, value): \"\"\"Add bonus", "= 0 self.inHand = 1 getCard(self) self.rect = self.image.get_rect() if self.elementName != None:", "else: value = str(value) self.modifier = Text(value, \"rimouski sb.ttf\", white, 60) self.modifierBack =", "i in range(0, 4): self.values[i] += self.modifierValue def addCursor(self): \"\"\"Add a colored border", "\"\"\"Add bonus or malus to the card and draw the bonus on the", "self.elementRect.topright = self.rect.topright self.elementRect.move_ip(-2, 2) self.image.blit(self.element, self.elementRect) def changeOwner(self): getCard(self) self.image.set_alpha() def addModifier(self,", "range(0, 4): if (self.values[i] == 'A'): self.values[i] = 10 else: self.values[i] = int(self.values[i])", "self.name = allCards[self.number] self.image = None #self.verso = carteVerso self.About = About(self.name, self)", "self.image = None #self.verso = carteVerso self.About = About(self.name, self) # We put", "color import * class Card(pygame.sprite.Sprite): \"\"\"Manages the cards in the game\"\"\" def __init__(self,", "super(pygame.sprite.Sprite).__init__(Card) self.owner = owner self.number = number self.name = allCards[self.number] self.image = None", "utf-8 import pygame import os from functions import * from color import *", "from Text import * from color import * class Card(pygame.sprite.Sprite): \"\"\"Manages the cards", "from listOfCards import allCards, values from About import About from Text import *", "# We put the numbers of the card according to listeCartes.py self.top =", "allCards, values from About import About from Text import * from color import", "card and draw the bonus on the card\"\"\" self.modifierValue = value if value", "in the game\"\"\" def __init__(self, number, owner): super(pygame.sprite.Sprite).__init__(Card) self.owner = owner self.number =", "self.rect.topright self.elementRect.move_ip(-2, 2) self.image.blit(self.element, self.elementRect) def changeOwner(self): getCard(self) self.image.set_alpha() def addModifier(self, value): \"\"\"Add", "!= None: self.element, self.elementRect = loadElement(self.elementName) self.elementRect.topright = self.rect.topright self.elementRect.move_ip(-2, 2) self.image.blit(self.element, self.elementRect)", "= [] self.values.append(self.top) self.values.append(self.right) self.values.append(self.bottom) self.values.append(self.left) self.parseToInt() # Which element self.elementName = values[number][4]", "values[number][1] self.bottom = values[number][2] self.left = values[number][3] self.values = [] self.values.append(self.top) self.values.append(self.right) self.values.append(self.bottom)", "str(value) self.modifier = Text(value, \"rimouski sb.ttf\", white, 60) self.modifierBack = Text(value, \"rimouski sb.ttf\",", "10 else: self.values[i] = int(self.values[i]) def __repr__(self): return \"<Card at %s >\" %", "loadElement(self.elementName) self.elementRect.topright = self.rect.topright self.elementRect.move_ip(-2, 2) self.image.blit(self.element, self.elementRect) def changeOwner(self): getCard(self) self.image.set_alpha() def", "self.number = number self.name = allCards[self.number] self.image = None #self.verso = carteVerso self.About", "the focused card\"\"\" self.border, self.borderRect = loadImage(\"images/border.png\") def parseToInt(self): for i in range(0,", "self.image.blit(self.modifier.surface, self.modifier.rect) for i in range(0, 4): self.values[i] += self.modifierValue def addCursor(self): \"\"\"Add", "bonus or malus to the card and draw the bonus on the card\"\"\"", "import os from functions import * from color import * from pygame.locals import", "60) self.modifierBack = Text(value, \"rimouski sb.ttf\", black, 60) #self.modifier.rect.topleft = self.rect.topleft self.modifier.rect.move_ip(35, 15)", "border to the focused card\"\"\" self.border, self.borderRect = loadImage(\"images/border.png\") def parseToInt(self): for i", "def addCursor(self): \"\"\"Add a colored border to the focused card\"\"\" self.border, self.borderRect =", "functions import * from color import * from pygame.locals import * from listOfCards", "\"\"\"Manages the cards in the game\"\"\" def __init__(self, number, owner): super(pygame.sprite.Sprite).__init__(Card) self.owner =" ]
[ "assert_is_not_none(props.get('descriptor')) class TestPagerDutyOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class", "return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_existing_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success, existing container\"\"\"", "time # it is called. This allows testing issues down the chain somewhere", "\"\"\"PhantomOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send", "cached method\"\"\" self.__dispatcher._get_default_properties = self.__backup_method @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock):", "License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "None def test_aws_format_output_config(self): \"\"\"AWSOutput format output config\"\"\" props = { 'descriptor': OutputProperty( 'short_descriptor',", "for setting up PhantomOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': url,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "alert = get_random_alert(30, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn',", "cls.__descriptor = 'unit_test_phantom' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown", "test_json_to_slack_mrkdwn_list(self): \"\"\"JSON to Slack mrkdwn - simple list\"\"\" simple_list = ['test_value_01', 'test_value_02'] result", "loaded_message['attachments'][0]['pretext'], default_rule_description) def test_json_to_slack_mrkdwn_str(self): \"\"\"JSON to Slack mrkdwn - simple str\"\"\" simple_str =", "def test_dispatch_container_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value", "to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PagerDutyOutput dispatch bad descriptor\"\"\"", "dict\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01', 100), ('nested_key_02',", "# tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_multi-part*') assert_equal(len(loaded_message['attachments']),", "class TestAWSOutput(object): \"\"\"Test class for AWSOutput Base\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class", "teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.__dispatcher = None def test_get_default_properties(self): \"\"\"Get", "alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def", "import outputs from stream_alert.alert_processor.output_base import OutputProperty from stream_alert_cli.helpers import create_lambda_function, put_mock_creds from tests.unit.stream_alert_alert_processor", "License for the specific language governing permissions and limitations under the License. \"\"\"", "REGION, FUNCTION_NAME, CONFIG) log_mock.assert_called_with( 'designated output service [%s] does not exist', bad_service) def", "# tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_single*') assert_equal(len(loaded_message['attachments']),", "\"\"\"PhantomOutput dispatch success, new container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect =", "200 url_mock.return_value.read.side_effect = ['{\"count\": 0, \"data\": []}', '{\"id\": 1948}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully", "log_mock): \"\"\"LambdaOutput dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to", "= OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_map_to_text(simple_dict, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[1],", "The user defined properties should at a minimum contain a descriptor assert_is_not_none(props.get('descriptor')) class", "= {'url': url, 'ph_auth_token': '<PASSWORD>'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info')", "result = self.__dispatcher._json_to_slack_mrkdwn(simple_list, 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def", "def test_dispatch(self, log_mock): \"\"\"LambdaOutput dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent", "formatted_config = self.__dispatcher.format_output_config(CONFIG, props) assert_equal(len(formatted_config), 2) assert_is_not_none(formatted_config.get('descriptor_value')) assert_is_not_none(formatted_config.get('unit_test_bucket')) def test_dispatch(self): \"\"\"AWSOutput dispatch pass\"\"\"", "result = self.__dispatcher._json_map_to_text(simple_dict, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def _setup_dispatch(self): \"\"\"Helper", "dispatcher - nonexistent\"\"\" nonexistent_service = 'aws-s4' dispatcher = outputs.get_output_dispatcher(nonexistent_service, REGION, FUNCTION_NAME, CONFIG) assert_is_none(dispatcher)", "test_dispatch_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (artifact)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.read.side_effect = ['',", "@mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"SlackOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name',", "error occurred while decoding ' 'Phantom container query response to JSON: %s', ValueError(", "@classmethod def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" outputs.AWSOutput.__abstractmethods__ = cls.__abstractmethods_cache cls.__dispatcher", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "'designated output service [%s] does not exist', bad_service) def test_user_defined_properties(): \"\"\"Get user defined", "import Counter, OrderedDict import json import boto3 from mock import call, patch from", "dispatcher - existing\"\"\" service = 'aws-s3' dispatcher = outputs.get_output_dispatcher( service, REGION, FUNCTION_NAME, CONFIG)", "send alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PagerDutyOutput dispatch", "REGION) return get_alert() @mock_lambda @patch('logging.Logger.info') def test_dispatch(self, log_mock): \"\"\"LambdaOutput dispatch\"\"\" alert = self._setup_dispatch()", "up LambdaOutput dispatch\"\"\" function_name = CONFIG[self.__service][alt_descriptor or self.__descriptor] create_lambda_function(function_name, REGION) return get_alert() @mock_lambda", "cls.__dispatcher = None def test_format_message_single(self): \"\"\"Format Single Message - Slack\"\"\" rule_name = 'test_rule_single'", "stream_alert_cli.helpers import create_lambda_function, put_mock_creds from tests.unit.stream_alert_alert_processor import CONFIG, FUNCTION_NAME, KMS_ALIAS, REGION from tests.unit.stream_alert_alert_processor.helpers", "def test_dispatch_container_query(self, request_mock): \"\"\"PhantomOutput - Container Query URL\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name',", "Base\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" # pylint: disable=abstract-class-instantiated", "result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 10) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(result[-1], '\\t\\t\\t*[3]* 51919')", "@patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_new_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success, new container\"\"\" alert =", "_setup_dispatch(self): \"\"\"Helper for setting up SlackOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds =", "governing permissions and limitations under the License. \"\"\" # pylint: disable=protected-access from collections", "alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure", "by applicable law or agreed to in writing, software distributed under the License", "sent alert to %s', self.__service) @mock_lambda @patch('logging.Logger.info') def test_dispatch_with_qualifier(self, log_mock): \"\"\"LambdaOutput dispatch with", "def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'phantom' cls.__descriptor =", "the class after all methods\"\"\" cls.__dispatcher = None def test_format_message_single(self): \"\"\"Format Single Message", "= cls.__abstractmethods_cache cls.__dispatcher = None def test_aws_format_output_config(self): \"\"\"AWSOutput format output config\"\"\" props =", "default_rule_description) def test_json_to_slack_mrkdwn_str(self): \"\"\"JSON to Slack mrkdwn - simple str\"\"\" simple_str = 'value", "None def test_locals(self): \"\"\"S3Output local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'S3Output') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self): \"\"\"Helper", "error\"\"\" bad_service = 'bad-output' outputs.get_output_dispatcher(bad_service, REGION, FUNCTION_NAME, CONFIG) log_mock.assert_called_with( 'designated output service [%s]", "mock_s3, mock_kms, mock_lambda from nose.tools import ( assert_equal, assert_is_none, assert_is_not_none, assert_set_equal ) from", "text\"\"\" simple_list = ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_list_to_text(simple_list, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[0],", "contain a descriptor assert_is_not_none(props.get('descriptor')) class TestPagerDutyOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def setup_class(cls):", "for S3Output\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service =", "any methods\"\"\" cls.__service = 'aws-s3' cls.__descriptor = 'unit_test_bucket' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME,", "def test_json_to_slack_mrkdwn_str(self): \"\"\"JSON to Slack mrkdwn - simple str\"\"\" simple_str = 'value to", "alert to %s', self.__service) @patch('stream_alert.alert_processor.output_base.StreamOutputBase._request_helper') def test_dispatch_container_query(self, request_mock): \"\"\"PhantomOutput - Container Query URL\"\"\"", "'aws-lambda' cls.__descriptor = 'unit_test_lambda' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls):", "0) assert_equal(len(result), 1) assert_equal(result[0], simple_str) def test_json_to_slack_mrkdwn_dict(self): \"\"\"JSON to Slack mrkdwn - simple", "test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"PagerDutyOutput dispatch failure\"\"\" alert = self._setup_dispatch() bad_message = '{\"error\": {\"message\":", "['', '{\"id\": 1902}'] # Use side_effect to change the getcode return value the", "= OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01',", "class before any methods\"\"\" cls.__service = 'aws-lambda' cls.__descriptor = 'unit_test_lambda' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service,", "= None def test_format_message_single(self): \"\"\"Format Single Message - Slack\"\"\" rule_name = 'test_rule_single' alert", "'slack' cls.__descriptor = 'unit_test_channel' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls):", "alert=alert) response = str( call('An error occurred while decoding ' 'Phantom container query", "output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'https://api.slack.com/web-hook-key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return", "put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self,", "@patch('logging.Logger.info') @mock_s3 def test_dispatch(self, log_mock): \"\"\"S3Output dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert)", "200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3", "self.__dispatcher._get_default_properties() assert_equal(len(props), 1) assert_equal(props['url'], 'https://events.pagerduty.com/generic/2010-04-15/create_event.json') def _setup_dispatch(self): \"\"\"Helper for setting up PagerDutyOutput dispatch\"\"\"", "None def test_format_message_single(self): \"\"\"Format Single Message - Slack\"\"\" rule_name = 'test_rule_single' alert =", "]) ])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 10) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'],", "props) assert_equal(len(formatted_config), 2) assert_is_not_none(formatted_config.get('descriptor_value')) assert_is_not_none(formatted_config.get('unit_test_bucket')) def test_dispatch(self): \"\"\"AWSOutput dispatch pass\"\"\" passed = self.__dispatcher.dispatch()", "OR CONDITIONS OF ANY KIND, either express or implied. See the License for", "@patch('urllib2.urlopen') def test_dispatch_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (artifact)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.read.side_effect", "\"\"\"AWSOutput dispatch pass\"\"\" passed = self.__dispatcher.dispatch() assert_is_none(passed) class TestS3Ouput(object): \"\"\"Test class for S3Output\"\"\"", "400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen')", "mrkdwn - multi type nested\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01',", "method with cached method\"\"\" self.__dispatcher._get_default_properties = self.__backup_method @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self,", "= 'unit_test_phantom' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the", "tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_multi-part*') assert_equal(len(loaded_message['attachments']), 2)", "may not use this file except in compliance with the License. You may", "rule description provided\\n' assert_equal( loaded_message['attachments'][0]['pretext'], default_rule_description) def test_json_to_slack_mrkdwn_str(self): \"\"\"JSON to Slack mrkdwn -", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "dispatch failure (artifact)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.read.side_effect = ['', '{\"id\": 1902}'] # Use", "('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', [ 6161, 1051, 51919 ]) ]))", "[\"err1\", \"err2\"]}}' url_mock.return_value.read.return_value = bad_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed", "PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'slack'", "Description:*\\nNo rule description provided\\n' assert_equal( loaded_message['attachments'][0]['pretext'], default_rule_description) def test_json_to_slack_mrkdwn_str(self): \"\"\"JSON to Slack mrkdwn", "class after all methods\"\"\" cls.dispatcher = None def test_locals(self): \"\"\"LambdaOutput local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__,", "def test_dispatch(self): \"\"\"AWSOutput dispatch pass\"\"\" passed = self.__dispatcher.dispatch() assert_is_none(passed) class TestS3Ouput(object): \"\"\"Test class", "def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"PagerDutyOutput dispatch failure\"\"\" alert = self._setup_dispatch() bad_message = '{\"error\":", "[ 6161, 1051, 51919 ]) ])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result),", "assert_equal, assert_is_none, assert_is_not_none, assert_set_equal ) from stream_alert.alert_processor import outputs from stream_alert.alert_processor.output_base import OutputProperty", "- nonexistent\"\"\" nonexistent_service = 'aws-s4' dispatcher = outputs.get_output_dispatcher(nonexistent_service, REGION, FUNCTION_NAME, CONFIG) assert_is_none(dispatcher) @patch('logging.Logger.error')", "\"data\": []}', '{\"id\": 1948}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service)", "dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'https://api.slack.com/web-hook-key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION,", "test_format_message_mutliple(self): \"\"\"Format Multi-Message - Slack\"\"\" rule_name = 'test_rule_multi-part' alert = get_random_alert(30, rule_name) loaded_message", "to format' result = self.__dispatcher._json_to_slack_mrkdwn(simple_str, 0) assert_equal(len(result), 1) assert_equal(result[0], simple_str) def test_json_to_slack_mrkdwn_dict(self): \"\"\"JSON", "alert = get_random_alert(10, rule_name, True) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests default_rule_description =", "_setup_dispatch(self, url): \"\"\"Helper for setting up PhantomOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds", "\"\"\"Setup the class before any methods\"\"\" cls.__service = 'aws-lambda' cls.__descriptor = 'unit_test_lambda' cls.__dispatcher", "= 'aws-s3' cls.__descriptor = 'unit_test_bucket' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def", "creds = {'url': 'https://api.slack.com/web-hook-key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen')", "log error\"\"\" bad_service = 'bad-output' outputs.get_output_dispatcher(bad_service, REGION, FUNCTION_NAME, CONFIG) log_mock.assert_called_with( 'designated output service", "= self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 10) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(result[-1], '\\t\\t\\t*[3]* 51919') def", ") from stream_alert.alert_processor import outputs from stream_alert.alert_processor.output_base import OutputProperty from stream_alert_cli.helpers import create_lambda_function,", "all methods\"\"\" outputs.AWSOutput.__abstractmethods__ = cls.__abstractmethods_cache cls.__dispatcher = None def test_aws_format_output_config(self): \"\"\"AWSOutput format output", "'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_map_to_text(simple_dict, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02')", "loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert", "teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.dispatcher = None def test_locals(self): \"\"\"S3Output", "response) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (artifact)\"\"\" alert =", "test_format_message_default_rule_description(self): \"\"\"Format Message Default Rule Description - Slack\"\"\" rule_name = 'test_empty_rule_description' alert =", "\"\"\"JSON list to text\"\"\" simple_list = ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_list_to_text(simple_list, '\\t', 0)", "send alert to %s', self.__service) @patch('stream_alert.alert_processor.output_base.StreamOutputBase._request_helper') def test_dispatch_container_query(self, request_mock): \"\"\"PhantomOutput - Container Query", "send alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"SlackOutput dispatch", "get_alert, remove_temp_secrets ) def test_existing_get_output_dispatcher(): \"\"\"Get output dispatcher - existing\"\"\" service = 'aws-s3'", "CONFIG) cls.__dispatcher.__service__ = 'aws-s3' @classmethod def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\"", "methods\"\"\" outputs.AWSOutput.__abstractmethods__ = cls.__abstractmethods_cache cls.__dispatcher = None def test_aws_format_output_config(self): \"\"\"AWSOutput format output config\"\"\"", "to change the getcode return value the second time # it is called.", "alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert to %s', self.__service)", "self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_new_container(self,", "KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_existing_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success, existing", "mrkdwn - nested dict\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([", "'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_single*') assert_equal(len(loaded_message['attachments']), 1) def test_format_message_mutliple(self): \"\"\"Format Multi-Message", "creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_existing_container(self, url_mock, log_mock): \"\"\"PhantomOutput", "('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_map_to_text(simple_dict, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def", "to Slack mrkdwn - simple dict\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result", "False)]) class TestSlackOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class", "provided\\n' assert_equal( loaded_message['attachments'][0]['pretext'], default_rule_description) def test_json_to_slack_mrkdwn_str(self): \"\"\"JSON to Slack mrkdwn - simple str\"\"\"", "= 'unit_test_pagerduty' cls.__backup_method = None cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def", "methods\"\"\" cls.__service = 'slack' cls.__descriptor = 'unit_test_channel' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG)", "tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_single*') assert_equal(len(loaded_message['attachments']), 1)", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "\"\"\"Helper for setting up S3Output dispatch\"\"\" bucket = CONFIG[self.__service][self.__descriptor] boto3.client('s3', region_name=REGION).create_bucket(Bucket=bucket) return get_alert()", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use", "('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_to_slack_mrkdwn(simple_dict, 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def test_json_to_slack_mrkdwn_nested_dict(self):", "' 'Phantom container query response to JSON: %s', ValueError( 'No JSON object could", "with qualifier\"\"\" alt_descriptor = '{}_qual'.format(self.__descriptor) alert = self._setup_dispatch(alt_descriptor) self.__dispatcher.dispatch(descriptor=alt_descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent", "test_dispatch_container_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value =", "before any methods\"\"\" cls.__service = 'phantom' cls.__descriptor = 'unit_test_phantom' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION,", "get_alert() @mock_lambda @patch('logging.Logger.info') def test_dispatch(self, log_mock): \"\"\"LambdaOutput dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name',", "0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_map_to_text(self): \"\"\"JSON map", "outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\"", "to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"SlackOutput dispatch", "'aws-s3' dispatcher = outputs.get_output_dispatcher( service, REGION, FUNCTION_NAME, CONFIG) assert_is_not_none(dispatcher) def test_nonexistent_get_output_dispatcher(): \"\"\"Get output", "\"\"\"Test class for LambdaOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\"", "'No JSON object could be decoded',))) assert_equal(str(log_mock.call_args_list[0]), response) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_failure(self, url_mock,", "teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" outputs.AWSOutput.__abstractmethods__ = cls.__abstractmethods_cache cls.__dispatcher = None", "test_locals(self): \"\"\"LambdaOutput local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'LambdaOutput') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self, alt_descriptor=''): \"\"\"Helper for", "@patch('urllib2.urlopen') def test_dispatch_new_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success, new container\"\"\" alert = self._setup_dispatch('phantom.foo.bar')", "import call, patch from moto import mock_s3, mock_kms, mock_lambda from nose.tools import (", "under the License. \"\"\" # pylint: disable=protected-access from collections import Counter, OrderedDict import", "def _setup_dispatch(self, url): \"\"\"Helper for setting up PhantomOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor)", "= 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to %s', self.__service)", "FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.dispatcher =", "@patch('logging.Logger.error') def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PhantomOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name',", "assert_equal(Counter(result[4])['\\t'], 1) assert_equal(result[-1], '\\t\\t\\t*[3]* 51919') def test_json_list_to_text(self): \"\"\"JSON list to text\"\"\" simple_list =", "@mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PagerDutyOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name',", "self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_error(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch decode error (setup container)\"\"\"", "OutputProperty from stream_alert_cli.helpers import create_lambda_function, put_mock_creds from tests.unit.stream_alert_alert_processor import CONFIG, FUNCTION_NAME, KMS_ALIAS, REGION", "log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_new_container(self, url_mock, log_mock): \"\"\"PhantomOutput", "@patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PagerDutyOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch()", "self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('stream_alert.alert_processor.output_base.StreamOutputBase._request_helper') def test_dispatch_container_query(self,", "not use this file except in compliance with the License. You may obtain", "'unit_test_pagerduty' cls.__backup_method = None cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls):", "KMS_ALIAS, REGION from tests.unit.stream_alert_alert_processor.helpers import ( get_random_alert, get_alert, remove_temp_secrets ) def test_existing_get_output_dispatcher(): \"\"\"Get", "\"\"\"Teardown the class after all methods\"\"\" cls.dispatcher = None def test_locals(self): \"\"\"LambdaOutput local", "'Phantom container query response to JSON: %s', ValueError( 'No JSON object could be", "dispatcher - log error\"\"\" bad_service = 'bad-output' outputs.get_output_dispatcher(bad_service, REGION, FUNCTION_NAME, CONFIG) log_mock.assert_called_with( 'designated", "setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'phantom' cls.__descriptor = 'unit_test_phantom'", "None, headers, False)]) class TestSlackOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup", "outputs.STREAM_OUTPUTS.values(): props = output(REGION, FUNCTION_NAME, CONFIG).get_user_defined_properties() # The user defined properties should at", "= 'bad-output' outputs.get_output_dispatcher(bad_service, REGION, FUNCTION_NAME, CONFIG) log_mock.assert_called_with( 'designated output service [%s] does not", "disable=protected-access from collections import Counter, OrderedDict import json import boto3 from mock import", "@mock_lambda @patch('logging.Logger.info') def test_dispatch(self, log_mock): \"\"\"LambdaOutput dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert)", "cls.__dispatcher = None def _setup_dispatch(self, url): \"\"\"Helper for setting up PhantomOutput dispatch\"\"\" remove_temp_secrets()", "rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') def test_dispatch_bad_descriptor(self, log_error_mock):", "request_mock): \"\"\"PhantomOutput - Container Query URL\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) full_url", "assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_map_to_text(self): \"\"\"JSON map to", "2.0 (the \"License\"); you may not use this file except in compliance with", "return value the second time # it is called. This allows testing issues", "def test_nonexistent_get_output_dispatcher(): \"\"\"Get output dispatcher - nonexistent\"\"\" nonexistent_service = 'aws-s4' dispatcher = outputs.get_output_dispatcher(nonexistent_service,", "- PagerDuty\"\"\" props = self.__dispatcher._get_default_properties() assert_equal(len(props), 1) assert_equal(props['url'], 'https://events.pagerduty.com/generic/2010-04-15/create_event.json') def _setup_dispatch(self): \"\"\"Helper for", "url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (artifact)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.read.side_effect = ['', '{\"id\":", "to %s', self.__service) @mock_lambda @patch('logging.Logger.info') def test_dispatch_with_qualifier(self, log_mock): \"\"\"LambdaOutput dispatch with qualifier\"\"\" alt_descriptor", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "setting up S3Output dispatch\"\"\" bucket = CONFIG[self.__service][self.__descriptor] boto3.client('s3', region_name=REGION).create_bucket(Bucket=bucket) return get_alert() @patch('logging.Logger.info') @mock_s3", "before any methods\"\"\" cls.__service = 'pagerduty' cls.__descriptor = 'unit_test_pagerduty' cls.__backup_method = None cls.__dispatcher", "def test_dispatch_new_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success, new container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value", "to %s', self.__service) @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_new_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success, new", "etc', 'bucket.value')} formatted_config = self.__dispatcher.format_output_config(CONFIG, props) assert_equal(len(formatted_config), 2) assert_is_not_none(formatted_config.get('descriptor_value')) assert_is_not_none(formatted_config.get('unit_test_bucket')) def test_dispatch(self): \"\"\"AWSOutput", "self.__dispatcher._json_to_slack_mrkdwn(simple_dict, 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def test_json_to_slack_mrkdwn_nested_dict(self): \"\"\"JSON to Slack mrkdwn", "= 'this\\nis\\nnot\\njson' self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) response = str( call('An error occurred while decoding", "= self.__dispatcher.dispatch() assert_is_none(passed) class TestS3Ouput(object): \"\"\"Test class for S3Output\"\"\" @classmethod def setup_class(cls): \"\"\"Setup", "all methods\"\"\" cls.dispatcher = None def test_locals(self): \"\"\"S3Output local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'S3Output') assert_equal(self.__dispatcher.__service__,", "self.__service) @patch('logging.Logger.error') def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PhantomOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor='bad_descriptor',", "bad descriptor\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert to", "to Slack mrkdwn - simple str\"\"\" simple_str = 'value to format' result =", "url_mock.return_value.read.side_effect = ['{\"count\": 1, \"data\": [{\"id\": 1948}]}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert", "log_error_mock): \"\"\"PhantomOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to", "log_mock): \"\"\"PhantomOutput dispatch success, new container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect", "rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_error(self,", "= frozenset() cls.__dispatcher = outputs.AWSOutput(REGION, FUNCTION_NAME, CONFIG) cls.__dispatcher.__service__ = 'aws-s3' @classmethod def teardown_class(cls):", "'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', [ 6161, 1051,", "TestPagerDutyOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any", "cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the class after", "the class before any methods\"\"\" cls.__service = 'pagerduty' cls.__descriptor = 'unit_test_pagerduty' cls.__backup_method =", "alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_new_container(self, url_mock, log_mock):", "put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() def _teardown_dispatch(self): \"\"\"Replace method with cached", "class before any methods\"\"\" cls.__service = 'phantom' cls.__descriptor = 'unit_test_phantom' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service,", "result = self.__dispatcher._json_to_slack_mrkdwn(simple_str, 0) assert_equal(len(result), 1) assert_equal(result[0], simple_str) def test_json_to_slack_mrkdwn_dict(self): \"\"\"JSON to Slack", "at a minimum contain a descriptor assert_is_not_none(props.get('descriptor')) class TestPagerDutyOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\"", "user defined properties should at a minimum contain a descriptor assert_is_not_none(props.get('descriptor')) class TestPagerDutyOutput(object):", "assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(Counter(result[6])['\\t'], 2) def test_json_to_slack_mrkdwn_list(self): \"\"\"JSON to Slack mrkdwn -", "assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self, alt_descriptor=''): \"\"\"Helper for setting up LambdaOutput dispatch\"\"\" function_name =", "url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\": 1, \"data\": [{\"id\": 1948}]}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert)", "self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'http://pagerduty.foo.bar/create_event.json', 'service_key': 'mocked_service_key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return", "= 200 url_mock.return_value.read.side_effect = ['{\"count\": 1, \"data\": [{\"id\": 1948}]}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully", "url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\": 0, \"data\": []}', '{\"id\": 1948}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name',", "failure (artifact)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.read.side_effect = ['', '{\"id\": 1902}'] # Use side_effect", "error (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.return_value = 'this\\nis\\nnot\\njson' self.__dispatcher.dispatch(descriptor=self.__descriptor,", "to send alert to %s', self.__service) @mock_s3 @mock_kms class TestPhantomOutput(object): \"\"\"Test class for", "after all methods\"\"\" cls.dispatcher = None def test_locals(self): \"\"\"S3Output local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'S3Output')", "'*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_to_slack_mrkdwn_multi_nested(self): \"\"\"JSON to Slack mrkdwn - multi", "to text\"\"\" simple_list = ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_list_to_text(simple_list, '\\t', 0) assert_equal(len(result), 2)", "rule_name = 'test_rule_multi-part' alert = get_random_alert(30, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests", "dispatch failure\"\"\" alert = self._setup_dispatch() bad_message = '{\"error\": {\"message\": \"failed\", \"errors\": [\"err1\", \"err2\"]}}'", "decode error (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.return_value = 'this\\nis\\nnot\\njson'", "- simple dict\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_to_slack_mrkdwn(simple_dict, 0)", "= self._setup_dispatch('phantom.foo.bar') url_mock.return_value.read.side_effect = ['', '{\"id\": 1902}'] # Use side_effect to change the", "= None def _setup_dispatch(self, url): \"\"\"Helper for setting up PhantomOutput dispatch\"\"\" remove_temp_secrets() output_name", "or self.__descriptor] create_lambda_function(function_name, REGION) return get_alert() @mock_lambda @patch('logging.Logger.info') def test_dispatch(self, log_mock): \"\"\"LambdaOutput dispatch\"\"\"", "dispatch decode error (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.return_value =", "self.__backup_method @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"PagerDutyOutput dispatch success\"\"\" alert", "test_dispatch(self): \"\"\"AWSOutput dispatch pass\"\"\" passed = self.__dispatcher.dispatch() assert_is_none(passed) class TestS3Ouput(object): \"\"\"Test class for", "region_name=REGION).create_bucket(Bucket=bucket) return get_alert() @patch('logging.Logger.info') @mock_s3 def test_dispatch(self, log_mock): \"\"\"S3Output dispatch\"\"\" alert = self._setup_dispatch()", "passed = self.__dispatcher.dispatch() assert_is_none(passed) class TestS3Ouput(object): \"\"\"Test class for S3Output\"\"\" @classmethod def setup_class(cls):", "\"\"\"Setup the class before any methods\"\"\" # pylint: disable=abstract-class-instantiated cls.__abstractmethods_cache = outputs.AWSOutput.__abstractmethods__ outputs.AWSOutput.__abstractmethods__", "class before any methods\"\"\" cls.__service = 'pagerduty' cls.__descriptor = 'unit_test_pagerduty' cls.__backup_method = None", "setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'pagerduty' cls.__descriptor = 'unit_test_pagerduty'", "test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"SlackOutput dispatch failure\"\"\" alert = self._setup_dispatch() error_message = 'a helpful", "@mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"PagerDutyOutput dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value =", "test_dispatch_success(self, url_mock, log_info_mock): \"\"\"SlackOutput dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor,", "= lambda: None output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'http://pagerduty.foo.bar/create_event.json', 'service_key': 'mocked_service_key'} put_mock_creds(output_name,", "\"\"\" Copyright 2017-present, Airbnb Inc. Licensed under the Apache License, Version 2.0 (the", "self.__service) def _setup_dispatch(self): \"\"\"Helper for setting up S3Output dispatch\"\"\" bucket = CONFIG[self.__service][self.__descriptor] boto3.client('s3',", "headers = {'ph-auth-token': 'mocked_auth_token'} request_mock.assert_has_calls([call(full_url, None, headers, False)]) class TestSlackOutput(object): \"\"\"Test class for", "_setup_dispatch(self): \"\"\"Helper for setting up PagerDutyOutput dispatch\"\"\" remove_temp_secrets() # Cache the _get_default_properties and", "simple_list = ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_to_slack_mrkdwn(simple_list, 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01')", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_map_to_text(self): \"\"\"JSON map to text\"\"\" simple_dict", "from collections import Counter, OrderedDict import json import boto3 from mock import call,", "url_mock, log_mock): \"\"\"PhantomOutput dispatch success, new container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200", "after all methods\"\"\" cls.__dispatcher = None def _setup_dispatch(self, url): \"\"\"Helper for setting up", "'unit_test_channel' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the class", "self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self,", "for setting up LambdaOutput dispatch\"\"\" function_name = CONFIG[self.__service][alt_descriptor or self.__descriptor] create_lambda_function(function_name, REGION) return", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "\"\"\"LambdaOutput dispatch with qualifier\"\"\" alt_descriptor = '{}_qual'.format(self.__descriptor) alert = self._setup_dispatch(alt_descriptor) self.__dispatcher.dispatch(descriptor=alt_descriptor, rule_name='rule_name', alert=alert)", "chain somewhere url_mock.return_value.getcode.side_effect = [200, 400] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert", "cls.__service = 'phantom' cls.__descriptor = 'unit_test_phantom' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod", "url_mock, log_error_mock): \"\"\"SlackOutput dispatch failure\"\"\" alert = self._setup_dispatch() error_message = 'a helpful error", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing,", "class TestLambdaOuput(object): \"\"\"Test class for LambdaOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "the class before any methods\"\"\" cls.__service = 'aws-lambda' cls.__descriptor = 'unit_test_lambda' cls.__dispatcher =", "assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def _setup_dispatch(self): \"\"\"Helper for setting up SlackOutput dispatch\"\"\"", "= 'test_empty_rule_description' alert = get_random_alert(10, rule_name, True) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests", "json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered:", "\"\"\"PhantomOutput dispatch decode error (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.return_value", "rule_name, True) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests default_rule_description = '*Rule Description:*\\nNo rule", "def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"SlackOutput dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200", "variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'LambdaOutput') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self, alt_descriptor=''): \"\"\"Helper for setting up LambdaOutput", "@patch('logging.Logger.info') def test_dispatch_with_qualifier(self, log_mock): \"\"\"LambdaOutput dispatch with qualifier\"\"\" alt_descriptor = '{}_qual'.format(self.__descriptor) alert =", "teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.dispatcher = None def test_locals(self): \"\"\"LambdaOutput", "to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_error(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch decode error", "def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"SlackOutput dispatch failure\"\"\" alert = self._setup_dispatch() error_message = 'a", "\"\"\"PagerDutyOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to", "%s', ValueError( 'No JSON object could be decoded',))) assert_equal(str(log_mock.call_args_list[0]), response) @patch('logging.Logger.error') @patch('urllib2.urlopen') def", "\"\"\"PagerDutyOutput dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch()", "\"\"\"PagerDutyOutput dispatch failure\"\"\" alert = self._setup_dispatch() bad_message = '{\"error\": {\"message\": \"failed\", \"errors\": [\"err1\",", "@patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"PagerDutyOutput dispatch failure\"\"\" alert = self._setup_dispatch()", "get_random_alert(10, rule_name, True) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests default_rule_description = '*Rule Description:*\\nNo", "'\\t\\t\\t*[3]* 51919') def test_json_list_to_text(self): \"\"\"JSON list to text\"\"\" simple_list = ['test_value_01', 'test_value_02'] result", "{'url': 'http://pagerduty.foo.bar/create_event.json', 'service_key': 'mocked_service_key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() def _teardown_dispatch(self):", "return get_alert() def _teardown_dispatch(self): \"\"\"Replace method with cached method\"\"\" self.__dispatcher._get_default_properties = self.__backup_method @patch('logging.Logger.info')", "'unit_test_lambda' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the class", "\"\"\"Test class for S3Output\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\"", "'test_value_02'] result = self.__dispatcher._json_list_to_text(simple_list, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]*", "('nested_nested_01', OrderedDict([ ('nested_nested_key_01', 300) ])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 7)", "self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (setup container)\"\"\" alert", "%s', self.__service) @mock_lambda @patch('logging.Logger.info') def test_dispatch_with_qualifier(self, log_mock): \"\"\"LambdaOutput dispatch with qualifier\"\"\" alt_descriptor =", "def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" # pylint: disable=abstract-class-instantiated cls.__abstractmethods_cache =", "self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @mock_s3", "CONFIG, FUNCTION_NAME, KMS_ALIAS, REGION from tests.unit.stream_alert_alert_processor.helpers import ( get_random_alert, get_alert, remove_temp_secrets ) def", "Triggered: test_rule_single*') assert_equal(len(loaded_message['attachments']), 1) def test_format_message_mutliple(self): \"\"\"Format Multi-Message - Slack\"\"\" rule_name = 'test_rule_multi-part'", "the chain somewhere url_mock.return_value.getcode.side_effect = [200, 400] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send", "\"\"\"Format Single Message - Slack\"\"\" rule_name = 'test_rule_single' alert = get_random_alert(25, rule_name) loaded_message", "def test_aws_format_output_config(self): \"\"\"AWSOutput format output config\"\"\" props = { 'descriptor': OutputProperty( 'short_descriptor', 'descriptor_value'),", "@mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"SlackOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor',", "methods\"\"\" cls.dispatcher = None def test_locals(self): \"\"\"S3Output local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'S3Output') assert_equal(self.__dispatcher.__service__, self.__service)", "'test_rule_multi-part' alert = get_random_alert(30, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests assert_set_equal(set(loaded_message.keys()), {'text',", "assert_equal(Counter(result[6])['\\t'], 2) def test_json_to_slack_mrkdwn_list(self): \"\"\"JSON to Slack mrkdwn - simple list\"\"\" simple_list =", "= error_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_error_mock.assert_any_call('Encountered an error while sending", "alert = self._setup_dispatch() error_message = 'a helpful error message' url_mock.return_value.read.return_value = error_message url_mock.return_value.getcode.return_value", "%s', self.__service) @patch('logging.Logger.error') def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PhantomOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch('phantom.foo.bar')", "import ( get_random_alert, get_alert, remove_temp_secrets ) def test_existing_get_output_dispatcher(): \"\"\"Get output dispatcher - existing\"\"\"", "%s', self.__service) class TestAWSOutput(object): \"\"\"Test class for AWSOutput Base\"\"\" @classmethod def setup_class(cls): \"\"\"Setup", "the class before any methods\"\"\" cls.__service = 'slack' cls.__descriptor = 'unit_test_channel' cls.__dispatcher =", "service [%s] does not exist', bad_service) def test_user_defined_properties(): \"\"\"Get user defined properties\"\"\" for", "CONFIG[self.__service][self.__descriptor] boto3.client('s3', region_name=REGION).create_bucket(Bucket=bucket) return get_alert() @patch('logging.Logger.info') @mock_s3 def test_dispatch(self, log_mock): \"\"\"S3Output dispatch\"\"\" alert", "log_info_mock): \"\"\"SlackOutput dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert)", "OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_map_to_text(simple_dict, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:*", "\"\"\"Helper for setting up PhantomOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url':", "setting up LambdaOutput dispatch\"\"\" function_name = CONFIG[self.__service][alt_descriptor or self.__descriptor] create_lambda_function(function_name, REGION) return get_alert()", "cls.__backup_method = None cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown", "permissions and limitations under the License. \"\"\" # pylint: disable=protected-access from collections import", "@classmethod def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.dispatcher = None def", "{'text', 'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_single*') assert_equal(len(loaded_message['attachments']), 1) def test_format_message_mutliple(self):", "test_value_02') def test_json_map_to_text(self): \"\"\"JSON map to text\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')])", "test_value_02') def test_json_to_slack_mrkdwn_nested_dict(self): \"\"\"JSON to Slack mrkdwn - nested dict\"\"\" nested_dict = OrderedDict([", "\"\"\"PhantomOutput - Container Query URL\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) full_url =", "- existing\"\"\" service = 'aws-s3' dispatcher = outputs.get_output_dispatcher( service, REGION, FUNCTION_NAME, CONFIG) assert_is_not_none(dispatcher)", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "nose.tools import ( assert_equal, assert_is_none, assert_is_not_none, assert_set_equal ) from stream_alert.alert_processor import outputs from", "1902}'] # Use side_effect to change the getcode return value the second time", "test_dispatch_success(self, url_mock, log_info_mock): \"\"\"PagerDutyOutput dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor,", "rule_name='rule_name', alert=alert) log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def", "6161, 1051, 51919 ]) ])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 10)", "self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) class TestAWSOutput(object): \"\"\"Test", "dispatch\"\"\" bucket = CONFIG[self.__service][self.__descriptor] boto3.client('s3', region_name=REGION).create_bucket(Bucket=bucket) return get_alert() @patch('logging.Logger.info') @mock_s3 def test_dispatch(self, log_mock):", "= outputs.get_output_dispatcher( service, REGION, FUNCTION_NAME, CONFIG) assert_is_not_none(dispatcher) def test_nonexistent_get_output_dispatcher(): \"\"\"Get output dispatcher -", "\"\"\"Get Default Properties - PagerDuty\"\"\" props = self.__dispatcher._get_default_properties() assert_equal(len(props), 1) assert_equal(props['url'], 'https://events.pagerduty.com/generic/2010-04-15/create_event.json') def", "test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PhantomOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed", "called. This allows testing issues down the chain somewhere url_mock.return_value.getcode.side_effect = [200, 400]", "# tests default_rule_description = '*Rule Description:*\\nNo rule description provided\\n' assert_equal( loaded_message['attachments'][0]['pretext'], default_rule_description) def", "def test_json_list_to_text(self): \"\"\"JSON list to text\"\"\" simple_list = ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_list_to_text(simple_list,", "= OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_to_slack_mrkdwn(simple_dict, 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:*", "up SlackOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'https://api.slack.com/web-hook-key'} put_mock_creds(output_name, creds,", "@patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"SlackOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch()", "for PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service =", "cls.__service = 'pagerduty' cls.__descriptor = 'unit_test_pagerduty' cls.__backup_method = None cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION,", "test_nonexistent_get_output_dispatcher(): \"\"\"Get output dispatcher - nonexistent\"\"\" nonexistent_service = 'aws-s4' dispatcher = outputs.get_output_dispatcher(nonexistent_service, REGION,", "after all methods\"\"\" cls.__dispatcher = None def test_format_message_single(self): \"\"\"Format Single Message - Slack\"\"\"", "['test_value_01', 'test_value_02'] result = self.__dispatcher._json_to_slack_mrkdwn(simple_list, 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]*", "import create_lambda_function, put_mock_creds from tests.unit.stream_alert_alert_processor import CONFIG, FUNCTION_NAME, KMS_ALIAS, REGION from tests.unit.stream_alert_alert_processor.helpers import", "LambdaOutput dispatch\"\"\" function_name = CONFIG[self.__service][alt_descriptor or self.__descriptor] create_lambda_function(function_name, REGION) return get_alert() @mock_lambda @patch('logging.Logger.info')", "Default Rule Description - Slack\"\"\" rule_name = 'test_empty_rule_description' alert = get_random_alert(10, rule_name, True)", "None def test_get_default_properties(self): \"\"\"Get Default Properties - PagerDuty\"\"\" props = self.__dispatcher._get_default_properties() assert_equal(len(props), 1)", "container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send", "= self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\": 1, \"data\": [{\"id\": 1948}]}'] self.__dispatcher.dispatch(descriptor=self.__descriptor,", "alt_descriptor=''): \"\"\"Helper for setting up LambdaOutput dispatch\"\"\" function_name = CONFIG[self.__service][alt_descriptor or self.__descriptor] create_lambda_function(function_name,", "- Slack\"\"\" rule_name = 'test_empty_rule_description' alert = get_random_alert(10, rule_name, True) loaded_message = json.loads(self.__dispatcher._format_message(rule_name,", "to %s', self.__service) class TestLambdaOuput(object): \"\"\"Test class for LambdaOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup", "rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) class TestAWSOutput(object): \"\"\"Test class", "Unless required by applicable law or agreed to in writing, software distributed under", "str\"\"\" simple_str = 'value to format' result = self.__dispatcher._json_to_slack_mrkdwn(simple_str, 0) assert_equal(len(result), 1) assert_equal(result[0],", "'service_key': 'mocked_service_key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() def _teardown_dispatch(self): \"\"\"Replace method", "self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_info_mock.assert_called_with('Successfully sent alert to %s',", "'aws_value': OutputProperty( 'unique arn value, bucket, etc', 'bucket.value')} formatted_config = self.__dispatcher.format_output_config(CONFIG, props) assert_equal(len(formatted_config),", "= 'value to format' result = self.__dispatcher._json_to_slack_mrkdwn(simple_str, 0) assert_equal(len(result), 1) assert_equal(result[0], simple_str) def", "= 'unit_test_lambda' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the", "FUNCTION_NAME, CONFIG) cls.__dispatcher.__service__ = 'aws-s3' @classmethod def teardown_class(cls): \"\"\"Teardown the class after all", "\"\"\"Format Multi-Message - Slack\"\"\" rule_name = 'test_rule_multi-part' alert = get_random_alert(30, rule_name) loaded_message =", "for LambdaOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service =", "\"\"\"S3Output dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s',", "the class before any methods\"\"\" cls.__service = 'phantom' cls.__descriptor = 'unit_test_phantom' cls.__dispatcher =", "class after all methods\"\"\" cls.__dispatcher = None def test_get_default_properties(self): \"\"\"Get Default Properties -", "text\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_map_to_text(simple_dict, '\\t', 0) assert_equal(len(result),", "OrderedDict([ ('nested_nested_key_01', 300) ])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 7) assert_equal(result[2],", "test_json_to_slack_mrkdwn_multi_nested(self): \"\"\"JSON to Slack mrkdwn - multi type nested\"\"\" nested_dict = OrderedDict([ ('root_key_01',", "self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_error_mock.assert_any_call('Encountered an error while sending to Slack: %s', error_message) log_error_mock.assert_any_call('Failed", "assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_single*') assert_equal(len(loaded_message['attachments']), 1) def", "user defined properties\"\"\" for output in outputs.STREAM_OUTPUTS.values(): props = output(REGION, FUNCTION_NAME, CONFIG).get_user_defined_properties() #", "[%s] does not exist', bad_service) def test_user_defined_properties(): \"\"\"Get user defined properties\"\"\" for output", "could be decoded',))) assert_equal(str(log_mock.call_args_list[0]), response) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch", "variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'S3Output') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self): \"\"\"Helper for setting up S3Output dispatch\"\"\"", "def test_dispatch_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (artifact)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.read.side_effect =", "= get_random_alert(25, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'})", "self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms", "the class after all methods\"\"\" cls.__dispatcher = None def test_get_default_properties(self): \"\"\"Get Default Properties", "\"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\"", "alert=alert) log_error_mock.assert_any_call('Encountered an error while sending to Slack: %s', error_message) log_error_mock.assert_any_call('Failed to send", "( get_random_alert, get_alert, remove_temp_secrets ) def test_existing_get_output_dispatcher(): \"\"\"Get output dispatcher - existing\"\"\" service", "local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'S3Output') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self): \"\"\"Helper for setting up S3Output", "get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"SlackOutput dispatch success\"\"\" alert", "software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "= outputs.AWSOutput.__abstractmethods__ outputs.AWSOutput.__abstractmethods__ = frozenset() cls.__dispatcher = outputs.AWSOutput(REGION, FUNCTION_NAME, CONFIG) cls.__dispatcher.__service__ = 'aws-s3'", "descriptor assert_is_not_none(props.get('descriptor')) class TestPagerDutyOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the", "moto import mock_s3, mock_kms, mock_lambda from nose.tools import ( assert_equal, assert_is_none, assert_is_not_none, assert_set_equal", "'https://api.slack.com/web-hook-key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def", "log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_failure(self, url_mock, log_mock): \"\"\"PhantomOutput", "setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'slack' cls.__descriptor = 'unit_test_channel'", "alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"SlackOutput", "outputs.get_output_dispatcher(bad_service, REGION, FUNCTION_NAME, CONFIG) log_mock.assert_called_with( 'designated output service [%s] does not exist', bad_service)", "self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def", "= None cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the", "class after all methods\"\"\" outputs.AWSOutput.__abstractmethods__ = cls.__abstractmethods_cache cls.__dispatcher = None def test_aws_format_output_config(self): \"\"\"AWSOutput", "self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 10) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(result[-1], '\\t\\t\\t*[3]* 51919') def test_json_list_to_text(self):", "rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_new_container(self, url_mock,", "url_mock.return_value.read.return_value = 'this\\nis\\nnot\\njson' self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) response = str( call('An error occurred while", "def test_json_to_slack_mrkdwn_nested_dict(self): \"\"\"JSON to Slack mrkdwn - nested dict\"\"\" nested_dict = OrderedDict([ ('root_key_01',", "type nested\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01', 100),", "assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_single*') assert_equal(len(loaded_message['attachments']), 1) def test_format_message_mutliple(self): \"\"\"Format Multi-Message -", "@patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"SlackOutput dispatch failure\"\"\" alert =", "class for PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service", "alert to %s', self.__service) class TestLambdaOuput(object): \"\"\"Test class for LambdaOutput\"\"\" @classmethod def setup_class(cls):", "in writing, software distributed under the License is distributed on an \"AS IS\"", "= 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error')", "= 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen')", "(artifact)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.read.side_effect = ['', '{\"id\": 1902}'] # Use side_effect to", "testing issues down the chain somewhere url_mock.return_value.getcode.side_effect = [200, 400] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert)", "'{}_qual'.format(self.__descriptor) alert = self._setup_dispatch(alt_descriptor) self.__dispatcher.dispatch(descriptor=alt_descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service)", "test_dispatch_container_query(self, request_mock): \"\"\"PhantomOutput - Container Query URL\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert)", "or agreed to in writing, software distributed under the License is distributed on", "test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_map_to_text(self): \"\"\"JSON map to text\"\"\" simple_dict = OrderedDict([('test_key_01',", "'<PASSWORD>'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_existing_container(self, url_mock,", "for PhantomOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service =", "= output(REGION, FUNCTION_NAME, CONFIG).get_user_defined_properties() # The user defined properties should at a minimum", "up PagerDutyOutput dispatch\"\"\" remove_temp_secrets() # Cache the _get_default_properties and set it to return", "function_name = CONFIG[self.__service][alt_descriptor or self.__descriptor] create_lambda_function(function_name, REGION) return get_alert() @mock_lambda @patch('logging.Logger.info') def test_dispatch(self,", "the _get_default_properties and set it to return None self.__backup_method = self.__dispatcher._get_default_properties self.__dispatcher._get_default_properties =", "CONFIG) log_mock.assert_called_with( 'designated output service [%s] does not exist', bad_service) def test_user_defined_properties(): \"\"\"Get", "self._teardown_dispatch() log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self,", "assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(result[-1], '\\t\\t\\t*[3]* 51919') def test_json_list_to_text(self): \"\"\"JSON list to text\"\"\"", "success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_info_mock.assert_called_with('Successfully sent alert", "from moto import mock_s3, mock_kms, mock_lambda from nose.tools import ( assert_equal, assert_is_none, assert_is_not_none,", "local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'LambdaOutput') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self, alt_descriptor=''): \"\"\"Helper for setting up", "remove_temp_secrets() # Cache the _get_default_properties and set it to return None self.__backup_method =", "self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PagerDutyOutput dispatch bad descriptor\"\"\" alert =", "= 'pagerduty' cls.__descriptor = 'unit_test_pagerduty' cls.__backup_method = None cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME,", "OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([", "OrderedDict([ ('nested_nested_key_01', [ 6161, 1051, 51919 ]) ])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict,", "OutputProperty( 'short_descriptor', 'descriptor_value'), 'aws_value': OutputProperty( 'unique arn value, bucket, etc', 'bucket.value')} formatted_config =", "- log error\"\"\" bad_service = 'bad-output' outputs.get_output_dispatcher(bad_service, REGION, FUNCTION_NAME, CONFIG) log_mock.assert_called_with( 'designated output", "\"\"\"Teardown the class after all methods\"\"\" cls.__dispatcher = None def _setup_dispatch(self, url): \"\"\"Helper", "get_alert() @patch('logging.Logger.info') @mock_s3 def test_dispatch(self, log_mock): \"\"\"S3Output dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name',", "from tests.unit.stream_alert_alert_processor import CONFIG, FUNCTION_NAME, KMS_ALIAS, REGION from tests.unit.stream_alert_alert_processor.helpers import ( get_random_alert, get_alert,", "\"\"\"Replace method with cached method\"\"\" self.__dispatcher._get_default_properties = self.__backup_method @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def", "assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_to_slack_mrkdwn_multi_nested(self): \"\"\"JSON to Slack mrkdwn -", "def test_get_output_dispatcher_logging(log_mock): \"\"\"Get output dispatcher - log error\"\"\" bad_service = 'bad-output' outputs.get_output_dispatcher(bad_service, REGION,", "dispatch\"\"\" function_name = CONFIG[self.__service][alt_descriptor or self.__descriptor] create_lambda_function(function_name, REGION) return get_alert() @mock_lambda @patch('logging.Logger.info') def", "cls.__dispatcher = None def test_aws_format_output_config(self): \"\"\"AWSOutput format output config\"\"\" props = { 'descriptor':", "class TestPagerDutyOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before", "def test_json_to_slack_mrkdwn_dict(self): \"\"\"JSON to Slack mrkdwn - simple dict\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'),", "= None def test_get_default_properties(self): \"\"\"Get Default Properties - PagerDuty\"\"\" props = self.__dispatcher._get_default_properties() assert_equal(len(props),", "'pagerduty' cls.__descriptor = 'unit_test_pagerduty' cls.__backup_method = None cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG)", "10) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(result[-1], '\\t\\t\\t*[3]* 51919') def test_json_list_to_text(self): \"\"\"JSON list to", "alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.return_value = 'this\\nis\\nnot\\njson' self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) response", "def test_locals(self): \"\"\"S3Output local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'S3Output') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self): \"\"\"Helper for", "CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.dispatcher = None", "'test_value_02')]) result = self.__dispatcher._json_map_to_text(simple_dict, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def _setup_dispatch(self):", "= self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\": 0, \"data\": []}', '{\"id\": 1948}']", "self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'https://api.slack.com/web-hook-key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info')", "@patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"SlackOutput dispatch failure\"\"\" alert = self._setup_dispatch()", "self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.return_value = 'this\\nis\\nnot\\njson' self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) response = str(", "from stream_alert_cli.helpers import create_lambda_function, put_mock_creds from tests.unit.stream_alert_alert_processor import CONFIG, FUNCTION_NAME, KMS_ALIAS, REGION from", "('root_nested_01', OrderedDict([ ('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', [ 6161, 1051, 51919", "(setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to", "# pylint: disable=abstract-class-instantiated cls.__abstractmethods_cache = outputs.AWSOutput.__abstractmethods__ outputs.AWSOutput.__abstractmethods__ = frozenset() cls.__dispatcher = outputs.AWSOutput(REGION, FUNCTION_NAME,", "minimum contain a descriptor assert_is_not_none(props.get('descriptor')) class TestPagerDutyOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def", "alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) full_url = 'phantom.foo.bar/rest/container?_filter_name=\"rule_name\"&page_size=1' headers = {'ph-auth-token': 'mocked_auth_token'}", "create_lambda_function(function_name, REGION) return get_alert() @mock_lambda @patch('logging.Logger.info') def test_dispatch(self, log_mock): \"\"\"LambdaOutput dispatch\"\"\" alert =", "ValueError( 'No JSON object could be decoded',))) assert_equal(str(log_mock.call_args_list[0]), response) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_failure(self,", "properties should at a minimum contain a descriptor assert_is_not_none(props.get('descriptor')) class TestPagerDutyOutput(object): \"\"\"Test class", "'descriptor': OutputProperty( 'short_descriptor', 'descriptor_value'), 'aws_value': OutputProperty( 'unique arn value, bucket, etc', 'bucket.value')} formatted_config", "all methods\"\"\" cls.__dispatcher = None def test_format_message_single(self): \"\"\"Format Single Message - Slack\"\"\" rule_name", "def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.__dispatcher = None def test_get_default_properties(self):", "from tests.unit.stream_alert_alert_processor.helpers import ( get_random_alert, get_alert, remove_temp_secrets ) def test_existing_get_output_dispatcher(): \"\"\"Get output dispatcher", "2) assert_equal(result[1], '*test_key_02:* test_value_02') def _setup_dispatch(self): \"\"\"Helper for setting up SlackOutput dispatch\"\"\" remove_temp_secrets()", "= self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to %s', self.__service)", "@mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"PagerDutyOutput dispatch failure\"\"\" alert = self._setup_dispatch() bad_message =", "to JSON: %s', ValueError( 'No JSON object could be decoded',))) assert_equal(str(log_mock.call_args_list[0]), response) @patch('logging.Logger.error')", "= ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_list_to_text(simple_list, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01')", "this file except in compliance with the License. You may obtain a copy", "'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_single*') assert_equal(len(loaded_message['attachments']), 1) def test_format_message_mutliple(self): \"\"\"Format", "assert_is_not_none, assert_set_equal ) from stream_alert.alert_processor import outputs from stream_alert.alert_processor.output_base import OutputProperty from stream_alert_cli.helpers", "you may not use this file except in compliance with the License. You", "('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', 300) ]))", "response to JSON: %s', ValueError( 'No JSON object could be decoded',))) assert_equal(str(log_mock.call_args_list[0]), response)", ") def test_existing_get_output_dispatcher(): \"\"\"Get output dispatcher - existing\"\"\" service = 'aws-s3' dispatcher =", "dispatch bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send", "allows testing issues down the chain somewhere url_mock.return_value.getcode.side_effect = [200, 400] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name',", "output service [%s] does not exist', bad_service) def test_user_defined_properties(): \"\"\"Get user defined properties\"\"\"", "is called. This allows testing issues down the chain somewhere url_mock.return_value.getcode.side_effect = [200,", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert to %s', self.__service)", "FUNCTION_NAME, CONFIG) assert_is_none(dispatcher) @patch('logging.Logger.error') def test_get_output_dispatcher_logging(log_mock): \"\"\"Get output dispatcher - log error\"\"\" bad_service", "message' url_mock.return_value.read.return_value = error_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_error_mock.assert_any_call('Encountered an error", "log_error_mock.assert_any_call('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock):", "Slack mrkdwn - nested dict\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01',", "test_json_to_slack_mrkdwn_nested_dict(self): \"\"\"JSON to Slack mrkdwn - nested dict\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'),", "dispatch with qualifier\"\"\" alt_descriptor = '{}_qual'.format(self.__descriptor) alert = self._setup_dispatch(alt_descriptor) self.__dispatcher.dispatch(descriptor=alt_descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully", "('root_nested_01', OrderedDict([ ('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', 300) ])) ])) ])", "self.__dispatcher._get_default_properties self.__dispatcher._get_default_properties = lambda: None output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'http://pagerduty.foo.bar/create_event.json', 'service_key':", "FUNCTION_NAME, KMS_ALIAS, REGION from tests.unit.stream_alert_alert_processor.helpers import ( get_random_alert, get_alert, remove_temp_secrets ) def test_existing_get_output_dispatcher():", "None def test_locals(self): \"\"\"LambdaOutput local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'LambdaOutput') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self, alt_descriptor=''):", "\"\"\"Setup the class before any methods\"\"\" cls.__service = 'aws-s3' cls.__descriptor = 'unit_test_bucket' cls.__dispatcher", "methods\"\"\" cls.__service = 'pagerduty' cls.__descriptor = 'unit_test_pagerduty' cls.__backup_method = None cls.__dispatcher = outputs.get_output_dispatcher(cls.__service,", "url_mock, log_mock): \"\"\"PhantomOutput dispatch success, existing container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200", "method\"\"\" self.__dispatcher._get_default_properties = self.__backup_method @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"PagerDutyOutput", "\"\"\" # pylint: disable=protected-access from collections import Counter, OrderedDict import json import boto3", "= 'test_rule_single' alert = get_random_alert(25, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests assert_set_equal(set(loaded_message.keys()),", "\"\"\"Test class for AWSOutput Base\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any", "])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 10) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(result[-1],", "self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) class TestAWSOutput(object):", "descriptor\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert to %s',", "setting up PagerDutyOutput dispatch\"\"\" remove_temp_secrets() # Cache the _get_default_properties and set it to", "@mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"SlackOutput dispatch failure\"\"\" alert = self._setup_dispatch() error_message", "test_json_list_to_text(self): \"\"\"JSON list to text\"\"\" simple_list = ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_list_to_text(simple_list, '\\t',", "lambda: None output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'http://pagerduty.foo.bar/create_event.json', 'service_key': 'mocked_service_key'} put_mock_creds(output_name, creds,", "= self._setup_dispatch() bad_message = '{\"error\": {\"message\": \"failed\", \"errors\": [\"err1\", \"err2\"]}}' url_mock.return_value.read.return_value = bad_message", "up S3Output dispatch\"\"\" bucket = CONFIG[self.__service][self.__descriptor] boto3.client('s3', region_name=REGION).create_bucket(Bucket=bucket) return get_alert() @patch('logging.Logger.info') @mock_s3 def", "to send alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_error(self, url_mock, log_mock): \"\"\"PhantomOutput", "methods\"\"\" cls.__service = 'aws-s3' cls.__descriptor = 'unit_test_bucket' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG)", "= bad_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert", "side_effect to change the getcode return value the second time # it is", "'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_to_slack_mrkdwn(simple_dict, 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def", "@classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" # pylint: disable=abstract-class-instantiated cls.__abstractmethods_cache", "url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_error_mock.assert_any_call('Encountered an error while sending to Slack:", "simple dict\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_to_slack_mrkdwn(simple_dict, 0) assert_equal(len(result),", "get_random_alert, get_alert, remove_temp_secrets ) def test_existing_get_output_dispatcher(): \"\"\"Get output dispatcher - existing\"\"\" service =", "and set it to return None self.__backup_method = self.__dispatcher._get_default_properties self.__dispatcher._get_default_properties = lambda: None", "= self.__dispatcher._json_list_to_text(simple_list, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def", "assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_multi-part*') assert_equal(len(loaded_message['attachments']), 2) assert_equal(loaded_message['attachments'][1]", "stream_alert.alert_processor import outputs from stream_alert.alert_processor.output_base import OutputProperty from stream_alert_cli.helpers import create_lambda_function, put_mock_creds from", "alert = get_random_alert(25, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn',", "rule_name = 'test_rule_single' alert = get_random_alert(25, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests", "['test_value_01', 'test_value_02'] result = self.__dispatcher._json_list_to_text(simple_list, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1],", "\"err2\"]}}' url_mock.return_value.read.return_value = bad_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to", "outputs.AWSOutput.__abstractmethods__ = cls.__abstractmethods_cache cls.__dispatcher = None def test_aws_format_output_config(self): \"\"\"AWSOutput format output config\"\"\" props", "result = self.__dispatcher._json_to_slack_mrkdwn(simple_dict, 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def test_json_to_slack_mrkdwn_nested_dict(self): \"\"\"JSON to", "'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_multi-part*') assert_equal(len(loaded_message['attachments']), 2) assert_equal(loaded_message['attachments'][1] ['text'].split('\\n')[3][1:7], '000028')", "\"\"\"S3Output local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'S3Output') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self): \"\"\"Helper for setting up", "Single Message - Slack\"\"\" rule_name = 'test_rule_single' alert = get_random_alert(25, rule_name) loaded_message =", "%s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (setup container)\"\"\"", "def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'pagerduty' cls.__descriptor =", "str( call('An error occurred while decoding ' 'Phantom container query response to JSON:", "\"\"\"Get user defined properties\"\"\" for output in outputs.STREAM_OUTPUTS.values(): props = output(REGION, FUNCTION_NAME, CONFIG).get_user_defined_properties()", "sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch", "error while sending to Slack: %s', error_message) log_error_mock.assert_any_call('Failed to send alert to %s',", "('nested_nested_key_01', [ 6161, 1051, 51919 ]) ])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0)", "the class after all methods\"\"\" cls.dispatcher = None def test_locals(self): \"\"\"LambdaOutput local variables\"\"\"", "setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'aws-s3' cls.__descriptor = 'unit_test_bucket'", "def _setup_dispatch(self): \"\"\"Helper for setting up PagerDutyOutput dispatch\"\"\" remove_temp_secrets() # Cache the _get_default_properties", "'*StreamAlert Rule Triggered: test_rule_multi-part*') assert_equal(len(loaded_message['attachments']), 2) assert_equal(loaded_message['attachments'][1] ['text'].split('\\n')[3][1:7], '000028') def test_format_message_default_rule_description(self): \"\"\"Format Message", "file except in compliance with the License. You may obtain a copy of", "@classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'pagerduty' cls.__descriptor", "@mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"PagerDutyOutput dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value", "]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 10) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(result[-1], '\\t\\t\\t*[3]*", "class for PhantomOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service", "Use side_effect to change the getcode return value the second time # it", "it to return None self.__backup_method = self.__dispatcher._get_default_properties self.__dispatcher._get_default_properties = lambda: None output_name =", "'*StreamAlert Rule Triggered: test_rule_single*') assert_equal(len(loaded_message['attachments']), 1) def test_format_message_mutliple(self): \"\"\"Format Multi-Message - Slack\"\"\" rule_name", "1948}]}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.info') @patch('urllib2.urlopen') def", "cls.__service = 'aws-s3' cls.__descriptor = 'unit_test_bucket' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod", "CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.__dispatcher = None", "to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"SlackOutput dispatch bad descriptor\"\"\"", "test_dispatch_new_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success, new container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value =", "rule_name='rule_name', alert=alert) full_url = 'phantom.foo.bar/rest/container?_filter_name=\"rule_name\"&page_size=1' headers = {'ph-auth-token': 'mocked_auth_token'} request_mock.assert_has_calls([call(full_url, None, headers, False)])", "\"\"\"Teardown the class after all methods\"\"\" cls.dispatcher = None def test_locals(self): \"\"\"S3Output local", "S3Output dispatch\"\"\" bucket = CONFIG[self.__service][self.__descriptor] boto3.client('s3', region_name=REGION).create_bucket(Bucket=bucket) return get_alert() @patch('logging.Logger.info') @mock_s3 def test_dispatch(self,", "service = 'aws-s3' dispatcher = outputs.get_output_dispatcher( service, REGION, FUNCTION_NAME, CONFIG) assert_is_not_none(dispatcher) def test_nonexistent_get_output_dispatcher():", "sent alert to %s', self.__service) @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_new_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch", "alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_info_mock.assert_called_with('Successfully sent alert to", "should at a minimum contain a descriptor assert_is_not_none(props.get('descriptor')) class TestPagerDutyOutput(object): \"\"\"Test class for", "'{\"id\": 1948}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen')", "cls.__descriptor = 'unit_test_pagerduty' cls.__backup_method = None cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod", "rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @mock_s3 @mock_kms class", "def test_existing_get_output_dispatcher(): \"\"\"Get output dispatcher - existing\"\"\" service = 'aws-s3' dispatcher = outputs.get_output_dispatcher(", "url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.return_value = 'this\\nis\\nnot\\njson' self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) response = str( call('An", "list\"\"\" simple_list = ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_to_slack_mrkdwn(simple_list, 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]*", "@mock_s3 @mock_kms class TestPhantomOutput(object): \"\"\"Test class for PhantomOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the", "OrderedDict([ ('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', [ 6161, 1051, 51919 ])", "assert_equal(self.__dispatcher.__class__.__name__, 'LambdaOutput') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self, alt_descriptor=''): \"\"\"Helper for setting up LambdaOutput dispatch\"\"\"", "assert_equal(len(result), 1) assert_equal(result[0], simple_str) def test_json_to_slack_mrkdwn_dict(self): \"\"\"JSON to Slack mrkdwn - simple dict\"\"\"", "\"\"\"LambdaOutput local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'LambdaOutput') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self, alt_descriptor=''): \"\"\"Helper for setting", "methods\"\"\" cls.__service = 'aws-lambda' cls.__descriptor = 'unit_test_lambda' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG)", "rule_name='rule_name', alert=alert) response = str( call('An error occurred while decoding ' 'Phantom container", "self.__service) class TestLambdaOuput(object): \"\"\"Test class for LambdaOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class", "before any methods\"\"\" cls.__service = 'aws-lambda' cls.__descriptor = 'unit_test_lambda' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION,", "self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\": 1, \"data\": [{\"id\": 1948}]}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name',", "def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.__dispatcher = None def _setup_dispatch(self,", "law or agreed to in writing, software distributed under the License is distributed", "self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3", "Version 2.0 (the \"License\"); you may not use this file except in compliance", "'value to format' result = self.__dispatcher._json_to_slack_mrkdwn(simple_str, 0) assert_equal(len(result), 1) assert_equal(result[0], simple_str) def test_json_to_slack_mrkdwn_dict(self):", "{ 'descriptor': OutputProperty( 'short_descriptor', 'descriptor_value'), 'aws_value': OutputProperty( 'unique arn value, bucket, etc', 'bucket.value')}", "= {'ph-auth-token': 'mocked_auth_token'} request_mock.assert_has_calls([call(full_url, None, headers, False)]) class TestSlackOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\"", "assert_equal(result[1], '*test_key_02:* test_value_02') def _setup_dispatch(self): \"\"\"Helper for setting up SlackOutput dispatch\"\"\" remove_temp_secrets() output_name", "language governing permissions and limitations under the License. \"\"\" # pylint: disable=protected-access from", "assert_is_not_none(dispatcher) def test_nonexistent_get_output_dispatcher(): \"\"\"Get output dispatcher - nonexistent\"\"\" nonexistent_service = 'aws-s4' dispatcher =", "2017-present, Airbnb Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you", "under the Apache License, Version 2.0 (the \"License\"); you may not use this", "issues down the chain somewhere url_mock.return_value.getcode.side_effect = [200, 400] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed", "S3Output\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'aws-s3'", "dispatch bad descriptor\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert", "= get_random_alert(30, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'})", "alert)) # tests default_rule_description = '*Rule Description:*\\nNo rule description provided\\n' assert_equal( loaded_message['attachments'][0]['pretext'], default_rule_description)", "container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\": 0, \"data\": []}',", "tests default_rule_description = '*Rule Description:*\\nNo rule description provided\\n' assert_equal( loaded_message['attachments'][0]['pretext'], default_rule_description) def test_json_to_slack_mrkdwn_str(self):", "2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_map_to_text(self): \"\"\"JSON map to text\"\"\"", "REGION from tests.unit.stream_alert_alert_processor.helpers import ( get_random_alert, get_alert, remove_temp_secrets ) def test_existing_get_output_dispatcher(): \"\"\"Get output", "put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_existing_container(self, url_mock, log_mock):", "or implied. See the License for the specific language governing permissions and limitations", "cls.__dispatcher = None def test_get_default_properties(self): \"\"\"Get Default Properties - PagerDuty\"\"\" props = self.__dispatcher._get_default_properties()", "@patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"SlackOutput dispatch success\"\"\" alert = self._setup_dispatch()", "send alert to %s', self.__service) class TestAWSOutput(object): \"\"\"Test class for AWSOutput Base\"\"\" @classmethod", "assert_equal(len(props), 1) assert_equal(props['url'], 'https://events.pagerduty.com/generic/2010-04-15/create_event.json') def _setup_dispatch(self): \"\"\"Helper for setting up PagerDutyOutput dispatch\"\"\" remove_temp_secrets()", "def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.__dispatcher = None def test_format_message_single(self):", "assert_equal(result[-1], '\\t\\t\\t*[3]* 51919') def test_json_list_to_text(self): \"\"\"JSON list to text\"\"\" simple_list = ['test_value_01', 'test_value_02']", "CONFIG).get_user_defined_properties() # The user defined properties should at a minimum contain a descriptor", "log_info_mock): \"\"\"PagerDutyOutput dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert)", "test_rule_multi-part*') assert_equal(len(loaded_message['attachments']), 2) assert_equal(loaded_message['attachments'][1] ['text'].split('\\n')[3][1:7], '000028') def test_format_message_default_rule_description(self): \"\"\"Format Message Default Rule Description", "simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_map_to_text(simple_dict, '\\t', 0) assert_equal(len(result), 2)", "self.__service) @patch('stream_alert.alert_processor.output_base.StreamOutputBase._request_helper') def test_dispatch_container_query(self, request_mock): \"\"\"PhantomOutput - Container Query URL\"\"\" alert = self._setup_dispatch('phantom.foo.bar')", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "Description - Slack\"\"\" rule_name = 'test_empty_rule_description' alert = get_random_alert(10, rule_name, True) loaded_message =", "outputs from stream_alert.alert_processor.output_base import OutputProperty from stream_alert_cli.helpers import create_lambda_function, put_mock_creds from tests.unit.stream_alert_alert_processor import", "Slack mrkdwn - simple list\"\"\" simple_list = ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_to_slack_mrkdwn(simple_list, 0)", "log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) class TestAWSOutput(object): \"\"\"Test class for AWSOutput", "CONFIG) assert_is_none(dispatcher) @patch('logging.Logger.error') def test_get_output_dispatcher_logging(log_mock): \"\"\"Get output dispatcher - log error\"\"\" bad_service =", "%s', self.__service) class TestLambdaOuput(object): \"\"\"Test class for LambdaOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the", "- simple str\"\"\" simple_str = 'value to format' result = self.__dispatcher._json_to_slack_mrkdwn(simple_str, 0) assert_equal(len(result),", "second time # it is called. This allows testing issues down the chain", "helpful error message' url_mock.return_value.read.return_value = error_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_error_mock.assert_any_call('Encountered", "except in compliance with the License. You may obtain a copy of the", "rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) class TestLambdaOuput(object): \"\"\"Test class for", "log_mock): \"\"\"LambdaOutput dispatch with qualifier\"\"\" alt_descriptor = '{}_qual'.format(self.__descriptor) alert = self._setup_dispatch(alt_descriptor) self.__dispatcher.dispatch(descriptor=alt_descriptor, rule_name='rule_name',", "'https://events.pagerduty.com/generic/2010-04-15/create_event.json') def _setup_dispatch(self): \"\"\"Helper for setting up PagerDutyOutput dispatch\"\"\" remove_temp_secrets() # Cache the", "self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3", "[]}', '{\"id\": 1948}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error')", "boto3.client('s3', region_name=REGION).create_bucket(Bucket=bucket) return get_alert() @patch('logging.Logger.info') @mock_s3 def test_dispatch(self, log_mock): \"\"\"S3Output dispatch\"\"\" alert =", "multi type nested\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01',", "alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\": 0, \"data\": []}', '{\"id\":", "result = self.__dispatcher._json_list_to_text(simple_list, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02')", "cls.__descriptor = 'unit_test_lambda' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown", "300) ])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 7) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'],", "100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', [ 6161, 1051, 51919 ]) ])) ]))", "assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_to_slack_mrkdwn_multi_nested(self): \"\"\"JSON to Slack", "\"\"\"JSON to Slack mrkdwn - simple str\"\"\" simple_str = 'value to format' result", "getcode return value the second time # it is called. This allows testing", "occurred while decoding ' 'Phantom container query response to JSON: %s', ValueError( 'No", "decoded',))) assert_equal(str(log_mock.call_args_list[0]), response) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (artifact)\"\"\"", "all methods\"\"\" cls.dispatcher = None def test_locals(self): \"\"\"LambdaOutput local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'LambdaOutput') assert_equal(self.__dispatcher.__service__,", "outputs.get_output_dispatcher(nonexistent_service, REGION, FUNCTION_NAME, CONFIG) assert_is_none(dispatcher) @patch('logging.Logger.error') def test_get_output_dispatcher_logging(log_mock): \"\"\"Get output dispatcher - log", "'mocked_service_key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() def _teardown_dispatch(self): \"\"\"Replace method with", "{'url': url, 'ph_auth_token': '<PASSWORD>'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen')", "nested\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01', 100), ('nested_key_02',", "to %s', self.__service) class TestAWSOutput(object): \"\"\"Test class for AWSOutput Base\"\"\" @classmethod def setup_class(cls):", "200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', [ 6161, 1051, 51919 ]) ])) ])) ]) result", "@patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"PagerDutyOutput dispatch failure\"\"\" alert =", "'test_empty_rule_description' alert = get_random_alert(10, rule_name, True) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests default_rule_description", "test_get_default_properties(self): \"\"\"Get Default Properties - PagerDuty\"\"\" props = self.__dispatcher._get_default_properties() assert_equal(len(props), 1) assert_equal(props['url'], 'https://events.pagerduty.com/generic/2010-04-15/create_event.json')", "express or implied. See the License for the specific language governing permissions and", "new container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\": 0, \"data\":", "= self.__dispatcher._json_to_slack_mrkdwn(simple_str, 0) assert_equal(len(result), 1) assert_equal(result[0], simple_str) def test_json_to_slack_mrkdwn_dict(self): \"\"\"JSON to Slack mrkdwn", "dispatch success, existing container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\":", "\"data\": [{\"id\": 1948}]}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.info')", "'short_descriptor', 'descriptor_value'), 'aws_value': OutputProperty( 'unique arn value, bucket, etc', 'bucket.value')} formatted_config = self.__dispatcher.format_output_config(CONFIG,", "TestAWSOutput(object): \"\"\"Test class for AWSOutput Base\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before", "url_mock.return_value.read.side_effect = ['{\"count\": 0, \"data\": []}', '{\"id\": 1948}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent", "alert to %s', self.__service) @patch('logging.Logger.error') def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PhantomOutput dispatch bad descriptor\"\"\" alert", "= CONFIG[self.__service][self.__descriptor] boto3.client('s3', region_name=REGION).create_bucket(Bucket=bucket) return get_alert() @patch('logging.Logger.info') @mock_s3 def test_dispatch(self, log_mock): \"\"\"S3Output dispatch\"\"\"", "Slack: %s', error_message) log_error_mock.assert_any_call('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms", "loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_single*') assert_equal(len(loaded_message['attachments']), 1) def test_format_message_mutliple(self): \"\"\"Format Multi-Message - Slack\"\"\"", "None cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the class", "remove_temp_secrets ) def test_existing_get_output_dispatcher(): \"\"\"Get output dispatcher - existing\"\"\" service = 'aws-s3' dispatcher", "get_random_alert(25, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'}) assert_equal(", "send alert to %s', self.__service) @mock_s3 @mock_kms class TestPhantomOutput(object): \"\"\"Test class for PhantomOutput\"\"\"", "alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @mock_s3 @mock_kms class TestPhantomOutput(object):", "from mock import call, patch from moto import mock_s3, mock_kms, mock_lambda from nose.tools", "log_mock.assert_called_with('Successfully sent alert to %s', self.__service) class TestLambdaOuput(object): \"\"\"Test class for LambdaOutput\"\"\" @classmethod", "'{\"id\": 1902}'] # Use side_effect to change the getcode return value the second", "'S3Output') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self): \"\"\"Helper for setting up S3Output dispatch\"\"\" bucket =", "REGION, KMS_ALIAS) return get_alert() def _teardown_dispatch(self): \"\"\"Replace method with cached method\"\"\" self.__dispatcher._get_default_properties =", "be decoded',))) assert_equal(str(log_mock.call_args_list[0]), response) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure", "= self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': url, 'ph_auth_token': '<PASSWORD>'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS)", "]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 7) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(Counter(result[6])['\\t'], 2)", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "%s', error_message) log_error_mock.assert_any_call('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def", "[{\"id\": 1948}]}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.info') @patch('urllib2.urlopen')", "= None def test_locals(self): \"\"\"S3Output local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'S3Output') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self):", "alert=alert) self._teardown_dispatch() log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def", "test_existing_get_output_dispatcher(): \"\"\"Get output dispatcher - existing\"\"\" service = 'aws-s3' dispatcher = outputs.get_output_dispatcher( service,", "an error while sending to Slack: %s', error_message) log_error_mock.assert_any_call('Failed to send alert to", "output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'http://pagerduty.foo.bar/create_event.json', 'service_key': 'mocked_service_key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION,", "def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"PagerDutyOutput dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200", "self.__service) @mock_s3 @mock_kms class TestPhantomOutput(object): \"\"\"Test class for PhantomOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup", "self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) full_url = 'phantom.foo.bar/rest/container?_filter_name=\"rule_name\"&page_size=1' headers = {'ph-auth-token': 'mocked_auth_token'} request_mock.assert_has_calls([call(full_url, None,", "self.__backup_method = self.__dispatcher._get_default_properties self.__dispatcher._get_default_properties = lambda: None output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url':", "Message Default Rule Description - Slack\"\"\" rule_name = 'test_empty_rule_description' alert = get_random_alert(10, rule_name,", "return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"SlackOutput dispatch success\"\"\"", "= json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule", "test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PagerDutyOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) self._teardown_dispatch()", "bad_message = '{\"error\": {\"message\": \"failed\", \"errors\": [\"err1\", \"err2\"]}}' url_mock.return_value.read.return_value = bad_message url_mock.return_value.getcode.return_value =", "log_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_error(self, url_mock, log_mock):", "simple list\"\"\" simple_list = ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_to_slack_mrkdwn(simple_list, 0) assert_equal(len(result), 2) assert_equal(result[0],", "mrkdwn - simple list\"\"\" simple_list = ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_to_slack_mrkdwn(simple_list, 0) assert_equal(len(result),", "= 'test_rule_multi-part' alert = get_random_alert(30, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests assert_set_equal(set(loaded_message.keys()),", "alert to %s', self.__service) @mock_s3 @mock_kms class TestPhantomOutput(object): \"\"\"Test class for PhantomOutput\"\"\" @classmethod", "dispatch pass\"\"\" passed = self.__dispatcher.dispatch() assert_is_none(passed) class TestS3Ouput(object): \"\"\"Test class for S3Output\"\"\" @classmethod", "= str( call('An error occurred while decoding ' 'Phantom container query response to", "all methods\"\"\" cls.__dispatcher = None def test_get_default_properties(self): \"\"\"Get Default Properties - PagerDuty\"\"\" props", "decoding ' 'Phantom container query response to JSON: %s', ValueError( 'No JSON object", "self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @mock_s3 @mock_kms", "('nested_nested_01', OrderedDict([ ('nested_nested_key_01', [ 6161, 1051, 51919 ]) ])) ])) ]) result =", "@mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"SlackOutput dispatch failure\"\"\" alert = self._setup_dispatch() error_message =", "self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') def test_dispatch_bad_descriptor(self,", "rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms", "'*Rule Description:*\\nNo rule description provided\\n' assert_equal( loaded_message['attachments'][0]['pretext'], default_rule_description) def test_json_to_slack_mrkdwn_str(self): \"\"\"JSON to Slack", "to send alert to %s', self.__service) class TestAWSOutput(object): \"\"\"Test class for AWSOutput Base\"\"\"", "assert_is_none, assert_is_not_none, assert_set_equal ) from stream_alert.alert_processor import outputs from stream_alert.alert_processor.output_base import OutputProperty from", "Slack\"\"\" rule_name = 'test_rule_multi-part' alert = get_random_alert(30, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) #", "2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_to_slack_mrkdwn_multi_nested(self): \"\"\"JSON to Slack mrkdwn", "{\"message\": \"failed\", \"errors\": [\"err1\", \"err2\"]}}' url_mock.return_value.read.return_value = bad_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name',", "= get_random_alert(10, rule_name, True) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests default_rule_description = '*Rule", "pass\"\"\" passed = self.__dispatcher.dispatch() assert_is_none(passed) class TestS3Ouput(object): \"\"\"Test class for S3Output\"\"\" @classmethod def", "value the second time # it is called. This allows testing issues down", "@mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"PagerDutyOutput dispatch failure\"\"\" alert = self._setup_dispatch() bad_message", "import CONFIG, FUNCTION_NAME, KMS_ALIAS, REGION from tests.unit.stream_alert_alert_processor.helpers import ( get_random_alert, get_alert, remove_temp_secrets )", "\"\"\"Teardown the class after all methods\"\"\" cls.__dispatcher = None def test_get_default_properties(self): \"\"\"Get Default", "format' result = self.__dispatcher._json_to_slack_mrkdwn(simple_str, 0) assert_equal(len(result), 1) assert_equal(result[0], simple_str) def test_json_to_slack_mrkdwn_dict(self): \"\"\"JSON to", "'*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_map_to_text(self): \"\"\"JSON map to text\"\"\" simple_dict =", "@patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_error(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch decode error (setup container)\"\"\" alert", "def test_format_message_mutliple(self): \"\"\"Format Multi-Message - Slack\"\"\" rule_name = 'test_rule_multi-part' alert = get_random_alert(30, rule_name)", "KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"SlackOutput dispatch", "to Slack mrkdwn - simple list\"\"\" simple_list = ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_to_slack_mrkdwn(simple_list,", "= [200, 400] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert to %s', self.__service)", "{'text', 'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_multi-part*') assert_equal(len(loaded_message['attachments']), 2) assert_equal(loaded_message['attachments'][1] ['text'].split('\\n')[3][1:7],", "url, 'ph_auth_token': '<PASSWORD>'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') def", "'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_multi-part*') assert_equal(len(loaded_message['attachments']), 2) assert_equal(loaded_message['attachments'][1] ['text'].split('\\n')[3][1:7], '000028') def", "@classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'aws-lambda' cls.__descriptor", "mrkdwn - simple dict\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_to_slack_mrkdwn(simple_dict,", "creds = {'url': url, 'ph_auth_token': '<PASSWORD>'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert()", "alert=alert) full_url = 'phantom.foo.bar/rest/container?_filter_name=\"rule_name\"&page_size=1' headers = {'ph-auth-token': 'mocked_auth_token'} request_mock.assert_has_calls([call(full_url, None, headers, False)]) class", "'bucket.value')} formatted_config = self.__dispatcher.format_output_config(CONFIG, props) assert_equal(len(formatted_config), 2) assert_is_not_none(formatted_config.get('descriptor_value')) assert_is_not_none(formatted_config.get('unit_test_bucket')) def test_dispatch(self): \"\"\"AWSOutput dispatch", "self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': url, 'ph_auth_token': '<PASSWORD>'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return", "])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 10) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1)", "alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @mock_lambda @patch('logging.Logger.info') def test_dispatch_with_qualifier(self, log_mock): \"\"\"LambdaOutput", "'aws-s4' dispatcher = outputs.get_output_dispatcher(nonexistent_service, REGION, FUNCTION_NAME, CONFIG) assert_is_none(dispatcher) @patch('logging.Logger.error') def test_get_output_dispatcher_logging(log_mock): \"\"\"Get output", "to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"PagerDutyOutput dispatch", "= CONFIG[self.__service][alt_descriptor or self.__descriptor] create_lambda_function(function_name, REGION) return get_alert() @mock_lambda @patch('logging.Logger.info') def test_dispatch(self, log_mock):", "rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('stream_alert.alert_processor.output_base.StreamOutputBase._request_helper') def test_dispatch_container_query(self, request_mock):", "\"\"\"Setup the class before any methods\"\"\" cls.__service = 'slack' cls.__descriptor = 'unit_test_channel' cls.__dispatcher", "REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.__dispatcher", "'aws-s3' cls.__descriptor = 'unit_test_bucket' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls):", "simple_str) def test_json_to_slack_mrkdwn_dict(self): \"\"\"JSON to Slack mrkdwn - simple dict\"\"\" simple_dict = OrderedDict([('test_key_01',", "1051, 51919 ]) ])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 10) assert_equal(result[2],", "'unique arn value, bucket, etc', 'bucket.value')} formatted_config = self.__dispatcher.format_output_config(CONFIG, props) assert_equal(len(formatted_config), 2) assert_is_not_none(formatted_config.get('descriptor_value'))", "before any methods\"\"\" cls.__service = 'aws-s3' cls.__descriptor = 'unit_test_bucket' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION,", "'*[2]* test_value_02') def test_json_map_to_text(self): \"\"\"JSON map to text\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02',", "test_rule_single*') assert_equal(len(loaded_message['attachments']), 1) def test_format_message_mutliple(self): \"\"\"Format Multi-Message - Slack\"\"\" rule_name = 'test_rule_multi-part' alert", "= ['{\"count\": 0, \"data\": []}', '{\"id\": 1948}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert", "descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to", "get_alert() def _teardown_dispatch(self): \"\"\"Replace method with cached method\"\"\" self.__dispatcher._get_default_properties = self.__backup_method @patch('logging.Logger.info') @patch('urllib2.urlopen')", "nonexistent_service = 'aws-s4' dispatcher = outputs.get_output_dispatcher(nonexistent_service, REGION, FUNCTION_NAME, CONFIG) assert_is_none(dispatcher) @patch('logging.Logger.error') def test_get_output_dispatcher_logging(log_mock):", "1) assert_equal(Counter(result[6])['\\t'], 2) def test_json_to_slack_mrkdwn_list(self): \"\"\"JSON to Slack mrkdwn - simple list\"\"\" simple_list", "dispatch failure (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert)", "dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_info_mock.assert_called_with('Successfully", "alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\": 1, \"data\": [{\"id\": 1948}]}']", "def test_format_message_single(self): \"\"\"Format Single Message - Slack\"\"\" rule_name = 'test_rule_single' alert = get_random_alert(25,", "= 'aws-s4' dispatcher = outputs.get_output_dispatcher(nonexistent_service, REGION, FUNCTION_NAME, CONFIG) assert_is_none(dispatcher) @patch('logging.Logger.error') def test_get_output_dispatcher_logging(log_mock): \"\"\"Get", "= 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error')", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "log_mock): \"\"\"S3Output dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to", "Slack\"\"\" rule_name = 'test_rule_single' alert = get_random_alert(25, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) #", "in compliance with the License. You may obtain a copy of the License", "log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('stream_alert.alert_processor.output_base.StreamOutputBase._request_helper') def test_dispatch_container_query(self, request_mock): \"\"\"PhantomOutput -", "KIND, either express or implied. See the License for the specific language governing", "= self.__dispatcher._get_default_properties() assert_equal(len(props), 1) assert_equal(props['url'], 'https://events.pagerduty.com/generic/2010-04-15/create_event.json') def _setup_dispatch(self): \"\"\"Helper for setting up PagerDutyOutput", "= '{}_qual'.format(self.__descriptor) alert = self._setup_dispatch(alt_descriptor) self.__dispatcher.dispatch(descriptor=alt_descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s',", "@patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (artifact)\"\"\" alert = self._setup_dispatch('phantom.foo.bar')", "= self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_info_mock.assert_called_with('Successfully sent alert to", "disable=abstract-class-instantiated cls.__abstractmethods_cache = outputs.AWSOutput.__abstractmethods__ outputs.AWSOutput.__abstractmethods__ = frozenset() cls.__dispatcher = outputs.AWSOutput(REGION, FUNCTION_NAME, CONFIG) cls.__dispatcher.__service__", "writing, software distributed under the License is distributed on an \"AS IS\" BASIS,", "error_message) log_error_mock.assert_any_call('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self,", "alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_info_mock.assert_called_with('Successfully sent alert", "License. \"\"\" # pylint: disable=protected-access from collections import Counter, OrderedDict import json import", "= self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert to", "'LambdaOutput') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self, alt_descriptor=''): \"\"\"Helper for setting up LambdaOutput dispatch\"\"\" function_name", "alert=alert) log_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_error(self, url_mock,", "tests.unit.stream_alert_alert_processor.helpers import ( get_random_alert, get_alert, remove_temp_secrets ) def test_existing_get_output_dispatcher(): \"\"\"Get output dispatcher -", "to send alert to %s', self.__service) @patch('logging.Logger.error') def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PhantomOutput dispatch bad", "\"\"\"Helper for setting up SlackOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url':", "'*[2]* test_value_02') def test_json_to_slack_mrkdwn_multi_nested(self): \"\"\"JSON to Slack mrkdwn - multi type nested\"\"\" nested_dict", "= self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('stream_alert.alert_processor.output_base.StreamOutputBase._request_helper')", "\"\"\"Get output dispatcher - nonexistent\"\"\" nonexistent_service = 'aws-s4' dispatcher = outputs.get_output_dispatcher(nonexistent_service, REGION, FUNCTION_NAME,", "setting up PhantomOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': url, 'ph_auth_token':", "OF ANY KIND, either express or implied. See the License for the specific", "import ( assert_equal, assert_is_none, assert_is_not_none, assert_set_equal ) from stream_alert.alert_processor import outputs from stream_alert.alert_processor.output_base", "class after all methods\"\"\" cls.__dispatcher = None def _setup_dispatch(self, url): \"\"\"Helper for setting", "while sending to Slack: %s', error_message) log_error_mock.assert_any_call('Failed to send alert to %s', self.__service)", "a minimum contain a descriptor assert_is_not_none(props.get('descriptor')) class TestPagerDutyOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod", "self.__dispatcher.dispatch() assert_is_none(passed) class TestS3Ouput(object): \"\"\"Test class for S3Output\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the", "remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'https://api.slack.com/web-hook-key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS)", "error_message = 'a helpful error message' url_mock.return_value.read.return_value = error_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor,", "0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def test_json_to_slack_mrkdwn_nested_dict(self): \"\"\"JSON to Slack mrkdwn -", "full_url = 'phantom.foo.bar/rest/container?_filter_name=\"rule_name\"&page_size=1' headers = {'ph-auth-token': 'mocked_auth_token'} request_mock.assert_has_calls([call(full_url, None, headers, False)]) class TestSlackOutput(object):", "def test_json_to_slack_mrkdwn_list(self): \"\"\"JSON to Slack mrkdwn - simple list\"\"\" simple_list = ['test_value_01', 'test_value_02']", "container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.return_value = 'this\\nis\\nnot\\njson' self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert)", "cls.__dispatcher = outputs.AWSOutput(REGION, FUNCTION_NAME, CONFIG) cls.__dispatcher.__service__ = 'aws-s3' @classmethod def teardown_class(cls): \"\"\"Teardown the", "OutputProperty( 'unique arn value, bucket, etc', 'bucket.value')} formatted_config = self.__dispatcher.format_output_config(CONFIG, props) assert_equal(len(formatted_config), 2)", "self.__service) class TestAWSOutput(object): \"\"\"Test class for AWSOutput Base\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the", "\"errors\": [\"err1\", \"err2\"]}}' url_mock.return_value.read.return_value = bad_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch()", "assert_equal(len(formatted_config), 2) assert_is_not_none(formatted_config.get('descriptor_value')) assert_is_not_none(formatted_config.get('unit_test_bucket')) def test_dispatch(self): \"\"\"AWSOutput dispatch pass\"\"\" passed = self.__dispatcher.dispatch() assert_is_none(passed)", "loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests default_rule_description = '*Rule Description:*\\nNo rule description provided\\n'", "@patch('urllib2.urlopen') def test_dispatch_container_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar')", "collections import Counter, OrderedDict import json import boto3 from mock import call, patch", "alert to %s', self.__service) class TestAWSOutput(object): \"\"\"Test class for AWSOutput Base\"\"\" @classmethod def", "_setup_dispatch(self, alt_descriptor=''): \"\"\"Helper for setting up LambdaOutput dispatch\"\"\" function_name = CONFIG[self.__service][alt_descriptor or self.__descriptor]", "config\"\"\" props = { 'descriptor': OutputProperty( 'short_descriptor', 'descriptor_value'), 'aws_value': OutputProperty( 'unique arn value,", "'mocked_auth_token'} request_mock.assert_has_calls([call(full_url, None, headers, False)]) class TestSlackOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def", "0) assert_equal(len(result), 7) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(Counter(result[6])['\\t'], 2) def test_json_to_slack_mrkdwn_list(self): \"\"\"JSON to", "self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\": 0, \"data\": []}', '{\"id\": 1948}'] self.__dispatcher.dispatch(descriptor=self.__descriptor,", "self._setup_dispatch() error_message = 'a helpful error message' url_mock.return_value.read.return_value = error_message url_mock.return_value.getcode.return_value = 400", "%s', self.__service) @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_new_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success, new container\"\"\"", "assert_equal( loaded_message['attachments'][0]['pretext'], default_rule_description) def test_json_to_slack_mrkdwn_str(self): \"\"\"JSON to Slack mrkdwn - simple str\"\"\" simple_str", "test_dispatch(self, log_mock): \"\"\"S3Output dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert", "sending to Slack: %s', error_message) log_error_mock.assert_any_call('Failed to send alert to %s', self.__service) @patch('logging.Logger.error')", "2) def test_json_to_slack_mrkdwn_list(self): \"\"\"JSON to Slack mrkdwn - simple list\"\"\" simple_list = ['test_value_01',", "See the License for the specific language governing permissions and limitations under the", "@patch('logging.Logger.error') def test_get_output_dispatcher_logging(log_mock): \"\"\"Get output dispatcher - log error\"\"\" bad_service = 'bad-output' outputs.get_output_dispatcher(bad_service,", "the getcode return value the second time # it is called. This allows", "outputs.AWSOutput(REGION, FUNCTION_NAME, CONFIG) cls.__dispatcher.__service__ = 'aws-s3' @classmethod def teardown_class(cls): \"\"\"Teardown the class after", "call('An error occurred while decoding ' 'Phantom container query response to JSON: %s',", "any methods\"\"\" cls.__service = 'pagerduty' cls.__descriptor = 'unit_test_pagerduty' cls.__backup_method = None cls.__dispatcher =", "1, \"data\": [{\"id\": 1948}]}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service)", "('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', [ 6161,", "URL\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) full_url = 'phantom.foo.bar/rest/container?_filter_name=\"rule_name\"&page_size=1' headers = {'ph-auth-token':", "PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'pagerduty'", "%s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"SlackOutput dispatch bad descriptor\"\"\" alert", "json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests default_rule_description = '*Rule Description:*\\nNo rule description provided\\n' assert_equal( loaded_message['attachments'][0]['pretext'],", "rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @mock_lambda @patch('logging.Logger.info') def test_dispatch_with_qualifier(self, log_mock):", "\"License\"); you may not use this file except in compliance with the License.", "self.__dispatcher._get_default_properties = self.__backup_method @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"PagerDutyOutput dispatch", "stream_alert.alert_processor.output_base import OutputProperty from stream_alert_cli.helpers import create_lambda_function, put_mock_creds from tests.unit.stream_alert_alert_processor import CONFIG, FUNCTION_NAME,", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "assert_equal(result[1], '*[2]* test_value_02') def test_json_to_slack_mrkdwn_multi_nested(self): \"\"\"JSON to Slack mrkdwn - multi type nested\"\"\"", "\"failed\", \"errors\": [\"err1\", \"err2\"]}}' url_mock.return_value.read.return_value = bad_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert)", "agreed to in writing, software distributed under the License is distributed on an", "the second time # it is called. This allows testing issues down the", "implied. See the License for the specific language governing permissions and limitations under", "bad_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to", "cls.__abstractmethods_cache cls.__dispatcher = None def test_aws_format_output_config(self): \"\"\"AWSOutput format output config\"\"\" props = {", "output dispatcher - nonexistent\"\"\" nonexistent_service = 'aws-s4' dispatcher = outputs.get_output_dispatcher(nonexistent_service, REGION, FUNCTION_NAME, CONFIG)", "'*test_key_02:* test_value_02') def test_json_to_slack_mrkdwn_nested_dict(self): \"\"\"JSON to Slack mrkdwn - nested dict\"\"\" nested_dict =", "['{\"count\": 0, \"data\": []}', '{\"id\": 1948}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to", "Copyright 2017-present, Airbnb Inc. Licensed under the Apache License, Version 2.0 (the \"License\");", "\"\"\"Get output dispatcher - log error\"\"\" bad_service = 'bad-output' outputs.get_output_dispatcher(bad_service, REGION, FUNCTION_NAME, CONFIG)", "default_rule_description = '*Rule Description:*\\nNo rule description provided\\n' assert_equal( loaded_message['attachments'][0]['pretext'], default_rule_description) def test_json_to_slack_mrkdwn_str(self): \"\"\"JSON", "@patch('stream_alert.alert_processor.output_base.StreamOutputBase._request_helper') def test_dispatch_container_query(self, request_mock): \"\"\"PhantomOutput - Container Query URL\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor=self.__descriptor,", "url_mock.return_value.getcode.side_effect = [200, 400] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert to %s',", "assert_equal(len(result), 7) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(Counter(result[6])['\\t'], 2) def test_json_to_slack_mrkdwn_list(self): \"\"\"JSON to Slack", "= '*Rule Description:*\\nNo rule description provided\\n' assert_equal( loaded_message['attachments'][0]['pretext'], default_rule_description) def test_json_to_slack_mrkdwn_str(self): \"\"\"JSON to", "test_dispatch_container_error(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch decode error (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value", "This allows testing issues down the chain somewhere url_mock.return_value.getcode.side_effect = [200, 400] self.__dispatcher.dispatch(descriptor=self.__descriptor,", "setup_class(cls): \"\"\"Setup the class before any methods\"\"\" # pylint: disable=abstract-class-instantiated cls.__abstractmethods_cache = outputs.AWSOutput.__abstractmethods__", "= 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_error_mock.assert_any_call('Encountered an error while sending to Slack: %s',", "any methods\"\"\" # pylint: disable=abstract-class-instantiated cls.__abstractmethods_cache = outputs.AWSOutput.__abstractmethods__ outputs.AWSOutput.__abstractmethods__ = frozenset() cls.__dispatcher =", "\"\"\"Format Message Default Rule Description - Slack\"\"\" rule_name = 'test_empty_rule_description' alert = get_random_alert(10,", "assert_equal(str(log_mock.call_args_list[0]), response) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (artifact)\"\"\" alert", "def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PagerDutyOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert)", "url_mock, log_mock): \"\"\"PhantomOutput dispatch decode error (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value =", "request_mock.assert_has_calls([call(full_url, None, headers, False)]) class TestSlackOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def setup_class(cls):", "required by applicable law or agreed to in writing, software distributed under the", "assert_is_not_none(formatted_config.get('descriptor_value')) assert_is_not_none(formatted_config.get('unit_test_bucket')) def test_dispatch(self): \"\"\"AWSOutput dispatch pass\"\"\" passed = self.__dispatcher.dispatch() assert_is_none(passed) class TestS3Ouput(object):", "'test_value_02'] result = self.__dispatcher._json_to_slack_mrkdwn(simple_list, 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02')", "container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\": 1, \"data\": [{\"id\":", "# it is called. This allows testing issues down the chain somewhere url_mock.return_value.getcode.side_effect", "OrderedDict import json import boto3 from mock import call, patch from moto import", "dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': url, 'ph_auth_token': '<PASSWORD>'} put_mock_creds(output_name, creds,", "@mock_s3 def test_dispatch(self, log_mock): \"\"\"S3Output dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully", "def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.dispatcher = None def test_locals(self):", "0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def _setup_dispatch(self): \"\"\"Helper for setting up SlackOutput", "<filename>tests/unit/stream_alert_alert_processor/test_outputs.py<gh_stars>1-10 \"\"\" Copyright 2017-present, Airbnb Inc. Licensed under the Apache License, Version 2.0", "import boto3 from mock import call, patch from moto import mock_s3, mock_kms, mock_lambda", "self.__dispatcher._get_default_properties = lambda: None output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'http://pagerduty.foo.bar/create_event.json', 'service_key': 'mocked_service_key'}", "Rule Description - Slack\"\"\" rule_name = 'test_empty_rule_description' alert = get_random_alert(10, rule_name, True) loaded_message", "= self._setup_dispatch() error_message = 'a helpful error message' url_mock.return_value.read.return_value = error_message url_mock.return_value.getcode.return_value =", "\"\"\"Get output dispatcher - existing\"\"\" service = 'aws-s3' dispatcher = outputs.get_output_dispatcher( service, REGION,", "class after all methods\"\"\" cls.__dispatcher = None def test_format_message_single(self): \"\"\"Format Single Message -", "= self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'http://pagerduty.foo.bar/create_event.json', 'service_key': 'mocked_service_key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS)", "test_json_to_slack_mrkdwn_dict(self): \"\"\"JSON to Slack mrkdwn - simple dict\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02',", "after all methods\"\"\" outputs.AWSOutput.__abstractmethods__ = cls.__abstractmethods_cache cls.__dispatcher = None def test_aws_format_output_config(self): \"\"\"AWSOutput format", "assert_is_none(passed) class TestS3Ouput(object): \"\"\"Test class for S3Output\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class", "AWSOutput Base\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" # pylint:", "@classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'slack' cls.__descriptor", "200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen')", "alert=alert) log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self,", "JSON: %s', ValueError( 'No JSON object could be decoded',))) assert_equal(str(log_mock.call_args_list[0]), response) @patch('logging.Logger.error') @patch('urllib2.urlopen')", "creds = {'url': 'http://pagerduty.foo.bar/create_event.json', 'service_key': 'mocked_service_key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert()", "get_random_alert(30, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'}) assert_equal(", "ANY KIND, either express or implied. See the License for the specific language", "\"\"\"Setup the class before any methods\"\"\" cls.__service = 'pagerduty' cls.__descriptor = 'unit_test_pagerduty' cls.__backup_method", "alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.read.side_effect = ['', '{\"id\": 1902}'] # Use side_effect to change", "'aws-s3' @classmethod def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" outputs.AWSOutput.__abstractmethods__ = cls.__abstractmethods_cache", "simple_list = ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_list_to_text(simple_list, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]*", "log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @mock_lambda @patch('logging.Logger.info') def test_dispatch_with_qualifier(self, log_mock): \"\"\"LambdaOutput dispatch", "'*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(Counter(result[6])['\\t'], 2) def test_json_to_slack_mrkdwn_list(self): \"\"\"JSON to Slack mrkdwn - simple", "dispatcher = outputs.get_output_dispatcher(nonexistent_service, REGION, FUNCTION_NAME, CONFIG) assert_is_none(dispatcher) @patch('logging.Logger.error') def test_get_output_dispatcher_logging(log_mock): \"\"\"Get output dispatcher", "dispatch bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert", "Counter, OrderedDict import json import boto3 from mock import call, patch from moto", "defined properties should at a minimum contain a descriptor assert_is_not_none(props.get('descriptor')) class TestPagerDutyOutput(object): \"\"\"Test", "methods\"\"\" cls.__service = 'phantom' cls.__descriptor = 'unit_test_phantom' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG)", "\"\"\"Setup the class before any methods\"\"\" cls.__service = 'phantom' cls.__descriptor = 'unit_test_phantom' cls.__dispatcher", "error message' url_mock.return_value.read.return_value = error_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_error_mock.assert_any_call('Encountered an", "url_mock.return_value.read.side_effect = ['', '{\"id\": 1902}'] # Use side_effect to change the getcode return", "= 'aws-s3' dispatcher = outputs.get_output_dispatcher( service, REGION, FUNCTION_NAME, CONFIG) assert_is_not_none(dispatcher) def test_nonexistent_get_output_dispatcher(): \"\"\"Get", "self.__descriptor] create_lambda_function(function_name, REGION) return get_alert() @mock_lambda @patch('logging.Logger.info') def test_dispatch(self, log_mock): \"\"\"LambdaOutput dispatch\"\"\" alert", "alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to %s',", "the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "Message - Slack\"\"\" rule_name = 'test_rule_single' alert = get_random_alert(25, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name,", "\"\"\"Teardown the class after all methods\"\"\" outputs.AWSOutput.__abstractmethods__ = cls.__abstractmethods_cache cls.__dispatcher = None def", "(the \"License\"); you may not use this file except in compliance with the", "properties\"\"\" for output in outputs.STREAM_OUTPUTS.values(): props = output(REGION, FUNCTION_NAME, CONFIG).get_user_defined_properties() # The user", "= '{\"error\": {\"message\": \"failed\", \"errors\": [\"err1\", \"err2\"]}}' url_mock.return_value.read.return_value = bad_message url_mock.return_value.getcode.return_value = 400", "the License. \"\"\" # pylint: disable=protected-access from collections import Counter, OrderedDict import json", "Slack\"\"\" rule_name = 'test_empty_rule_description' alert = get_random_alert(10, rule_name, True) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert))", "methods\"\"\" cls.__dispatcher = None def _setup_dispatch(self, url): \"\"\"Helper for setting up PhantomOutput dispatch\"\"\"", "props = self.__dispatcher._get_default_properties() assert_equal(len(props), 1) assert_equal(props['url'], 'https://events.pagerduty.com/generic/2010-04-15/create_event.json') def _setup_dispatch(self): \"\"\"Helper for setting up", "import json import boto3 from mock import call, patch from moto import mock_s3,", "@mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"SlackOutput dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value =", "class TestSlackOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before", "'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', 300) ])) ]))", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed", "url_mock.return_value.read.return_value = error_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_error_mock.assert_any_call('Encountered an error while", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.dispatcher", "the class before any methods\"\"\" # pylint: disable=abstract-class-instantiated cls.__abstractmethods_cache = outputs.AWSOutput.__abstractmethods__ outputs.AWSOutput.__abstractmethods__ =", "rule_name='rule_name', alert=alert) self._teardown_dispatch() log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms", "Multi-Message - Slack\"\"\" rule_name = 'test_rule_multi-part' alert = get_random_alert(30, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name,", "alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert", "200 url_mock.return_value.read.return_value = 'this\\nis\\nnot\\njson' self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) response = str( call('An error occurred", "REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_existing_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success,", "self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert to %s',", "1948}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def", "self.__dispatcher._json_to_slack_mrkdwn(simple_list, 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_to_slack_mrkdwn_multi_nested(self): \"\"\"JSON", "specific language governing permissions and limitations under the License. \"\"\" # pylint: disable=protected-access", "test_value_02') def test_json_to_slack_mrkdwn_multi_nested(self): \"\"\"JSON to Slack mrkdwn - multi type nested\"\"\" nested_dict =", "bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert", "'ph_auth_token': '<PASSWORD>'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_existing_container(self,", "get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_existing_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success, existing container\"\"\" alert", "assert_equal(len(loaded_message['attachments']), 1) def test_format_message_mutliple(self): \"\"\"Format Multi-Message - Slack\"\"\" rule_name = 'test_rule_multi-part' alert =", "assert_equal(result[1], '*[2]* test_value_02') def test_json_map_to_text(self): \"\"\"JSON map to text\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'),", "self.__dispatcher._json_map_to_text(simple_dict, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def _setup_dispatch(self): \"\"\"Helper for setting", "'test_value_02')]) result = self.__dispatcher._json_to_slack_mrkdwn(simple_dict, 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def test_json_to_slack_mrkdwn_nested_dict(self): \"\"\"JSON", "either express or implied. See the License for the specific language governing permissions", "@mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"SlackOutput dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value", "Query URL\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) full_url = 'phantom.foo.bar/rest/container?_filter_name=\"rule_name\"&page_size=1' headers =", "url_mock, log_info_mock): \"\"\"PagerDutyOutput dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name',", "alert)) # tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_single*')", "url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to %s',", "= ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_to_slack_mrkdwn(simple_list, 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1],", "output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': url, 'ph_auth_token': '<PASSWORD>'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION,", "self.__dispatcher._json_list_to_text(simple_list, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_map_to_text(self):", "Apache License, Version 2.0 (the \"License\"); you may not use this file except", "KMS_ALIAS) return get_alert() def _teardown_dispatch(self): \"\"\"Replace method with cached method\"\"\" self.__dispatcher._get_default_properties = self.__backup_method", "'{\"error\": {\"message\": \"failed\", \"errors\": [\"err1\", \"err2\"]}}' url_mock.return_value.read.return_value = bad_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor,", "Cache the _get_default_properties and set it to return None self.__backup_method = self.__dispatcher._get_default_properties self.__dispatcher._get_default_properties", "test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"SlackOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed", "None self.__backup_method = self.__dispatcher._get_default_properties self.__dispatcher._get_default_properties = lambda: None output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds =", "from nose.tools import ( assert_equal, assert_is_none, assert_is_not_none, assert_set_equal ) from stream_alert.alert_processor import outputs", "- nested dict\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01',", "( assert_equal, assert_is_none, assert_is_not_none, assert_set_equal ) from stream_alert.alert_processor import outputs from stream_alert.alert_processor.output_base import", "alert = self._setup_dispatch() bad_message = '{\"error\": {\"message\": \"failed\", \"errors\": [\"err1\", \"err2\"]}}' url_mock.return_value.read.return_value =", "assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_multi-part*') assert_equal(len(loaded_message['attachments']), 2) assert_equal(loaded_message['attachments'][1] ['text'].split('\\n')[3][1:7], '000028') def test_format_message_default_rule_description(self):", "simple_str = 'value to format' result = self.__dispatcher._json_to_slack_mrkdwn(simple_str, 0) assert_equal(len(result), 1) assert_equal(result[0], simple_str)", "self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @mock_lambda @patch('logging.Logger.info') def test_dispatch_with_qualifier(self,", "to in writing, software distributed under the License is distributed on an \"AS", "self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() def _teardown_dispatch(self): \"\"\"Replace method with cached method\"\"\" self.__dispatcher._get_default_properties", "does not exist', bad_service) def test_user_defined_properties(): \"\"\"Get user defined properties\"\"\" for output in", "None output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'http://pagerduty.foo.bar/create_event.json', 'service_key': 'mocked_service_key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket,", "remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': url, 'ph_auth_token': '<PASSWORD>'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket,", "to Slack: %s', error_message) log_error_mock.assert_any_call('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3", "200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', 300) ])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result),", "alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"SlackOutput dispatch bad", "@patch('urllib2.urlopen') def test_dispatch_existing_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success, existing container\"\"\" alert = self._setup_dispatch('phantom.foo.bar')", "rule_name = 'test_empty_rule_description' alert = get_random_alert(10, rule_name, True) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) #", "\"\"\"Helper for setting up PagerDutyOutput dispatch\"\"\" remove_temp_secrets() # Cache the _get_default_properties and set", "TestLambdaOuput(object): \"\"\"Test class for LambdaOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any", "PagerDutyOutput dispatch\"\"\" remove_temp_secrets() # Cache the _get_default_properties and set it to return None", "= self.__backup_method @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"PagerDutyOutput dispatch success\"\"\"", "])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 7) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(Counter(result[6])['\\t'],", "class for LambdaOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service", "= 'unit_test_channel' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the", "exist', bad_service) def test_user_defined_properties(): \"\"\"Get user defined properties\"\"\" for output in outputs.STREAM_OUTPUTS.values(): props", "bucket, etc', 'bucket.value')} formatted_config = self.__dispatcher.format_output_config(CONFIG, props) assert_equal(len(formatted_config), 2) assert_is_not_none(formatted_config.get('descriptor_value')) assert_is_not_none(formatted_config.get('unit_test_bucket')) def test_dispatch(self):", "\"\"\"AWSOutput format output config\"\"\" props = { 'descriptor': OutputProperty( 'short_descriptor', 'descriptor_value'), 'aws_value': OutputProperty(", "creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() def _teardown_dispatch(self): \"\"\"Replace method with cached method\"\"\"", "mock import call, patch from moto import mock_s3, mock_kms, mock_lambda from nose.tools import", "output(REGION, FUNCTION_NAME, CONFIG).get_user_defined_properties() # The user defined properties should at a minimum contain", "success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_info_mock.assert_called_with('Successfully sent", "= None def test_aws_format_output_config(self): \"\"\"AWSOutput format output config\"\"\" props = { 'descriptor': OutputProperty(", "test_json_to_slack_mrkdwn_str(self): \"\"\"JSON to Slack mrkdwn - simple str\"\"\" simple_str = 'value to format'", "up PhantomOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': url, 'ph_auth_token': '<PASSWORD>'}", "output in outputs.STREAM_OUTPUTS.values(): props = output(REGION, FUNCTION_NAME, CONFIG).get_user_defined_properties() # The user defined properties", "\"\"\"JSON to Slack mrkdwn - simple dict\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')])", "existing\"\"\" service = 'aws-s3' dispatcher = outputs.get_output_dispatcher( service, REGION, FUNCTION_NAME, CONFIG) assert_is_not_none(dispatcher) def", "rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_failure(self, url_mock,", "query response to JSON: %s', ValueError( 'No JSON object could be decoded',))) assert_equal(str(log_mock.call_args_list[0]),", "200 url_mock.return_value.read.side_effect = ['{\"count\": 1, \"data\": [{\"id\": 1948}]}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent", "methods\"\"\" cls.__dispatcher = None def test_format_message_single(self): \"\"\"Format Single Message - Slack\"\"\" rule_name =", "it is called. This allows testing issues down the chain somewhere url_mock.return_value.getcode.side_effect =", "PagerDuty\"\"\" props = self.__dispatcher._get_default_properties() assert_equal(len(props), 1) assert_equal(props['url'], 'https://events.pagerduty.com/generic/2010-04-15/create_event.json') def _setup_dispatch(self): \"\"\"Helper for setting", "CONFIG) assert_is_not_none(dispatcher) def test_nonexistent_get_output_dispatcher(): \"\"\"Get output dispatcher - nonexistent\"\"\" nonexistent_service = 'aws-s4' dispatcher", "TestS3Ouput(object): \"\"\"Test class for S3Output\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any", "'phantom.foo.bar/rest/container?_filter_name=\"rule_name\"&page_size=1' headers = {'ph-auth-token': 'mocked_auth_token'} request_mock.assert_has_calls([call(full_url, None, headers, False)]) class TestSlackOutput(object): \"\"\"Test class", "class for AWSOutput Base\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\"", "def test_dispatch(self, log_mock): \"\"\"S3Output dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent", "assert_equal(result[1], '*test_key_02:* test_value_02') def test_json_to_slack_mrkdwn_nested_dict(self): \"\"\"JSON to Slack mrkdwn - nested dict\"\"\" nested_dict", "def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'slack' cls.__descriptor =", "= self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 7) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(Counter(result[6])['\\t'], 2) def test_json_to_slack_mrkdwn_list(self):", "= 'phantom.foo.bar/rest/container?_filter_name=\"rule_name\"&page_size=1' headers = {'ph-auth-token': 'mocked_auth_token'} request_mock.assert_has_calls([call(full_url, None, headers, False)]) class TestSlackOutput(object): \"\"\"Test", "while decoding ' 'Phantom container query response to JSON: %s', ValueError( 'No JSON", "{'ph-auth-token': 'mocked_auth_token'} request_mock.assert_has_calls([call(full_url, None, headers, False)]) class TestSlackOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod", "from stream_alert.alert_processor import outputs from stream_alert.alert_processor.output_base import OutputProperty from stream_alert_cli.helpers import create_lambda_function, put_mock_creds", "def test_user_defined_properties(): \"\"\"Get user defined properties\"\"\" for output in outputs.STREAM_OUTPUTS.values(): props = output(REGION,", "the class after all methods\"\"\" outputs.AWSOutput.__abstractmethods__ = cls.__abstractmethods_cache cls.__dispatcher = None def test_aws_format_output_config(self):", "def test_json_map_to_text(self): \"\"\"JSON map to text\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result", "@mock_lambda @patch('logging.Logger.info') def test_dispatch_with_qualifier(self, log_mock): \"\"\"LambdaOutput dispatch with qualifier\"\"\" alt_descriptor = '{}_qual'.format(self.__descriptor) alert", "alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_error(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch decode", "('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', 300) ])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0)", "setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'aws-lambda' cls.__descriptor = 'unit_test_lambda'", "alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @mock_lambda", "class before any methods\"\"\" cls.__service = 'slack' cls.__descriptor = 'unit_test_channel' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service,", "self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service)", "%s', self.__service) @mock_s3 @mock_kms class TestPhantomOutput(object): \"\"\"Test class for PhantomOutput\"\"\" @classmethod def setup_class(cls):", "from stream_alert.alert_processor.output_base import OutputProperty from stream_alert_cli.helpers import create_lambda_function, put_mock_creds from tests.unit.stream_alert_alert_processor import CONFIG,", "self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @mock_lambda @patch('logging.Logger.info') def", "self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_failure(self,", "set it to return None self.__backup_method = self.__dispatcher._get_default_properties self.__dispatcher._get_default_properties = lambda: None output_name", "methods\"\"\" cls.dispatcher = None def test_locals(self): \"\"\"LambdaOutput local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'LambdaOutput') assert_equal(self.__dispatcher.__service__, self.__service)", "nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01', 100), ('nested_key_02', 200),", "%s', self.__service) @patch('stream_alert.alert_processor.output_base.StreamOutputBase._request_helper') def test_dispatch_container_query(self, request_mock): \"\"\"PhantomOutput - Container Query URL\"\"\" alert =", "import mock_s3, mock_kms, mock_lambda from nose.tools import ( assert_equal, assert_is_none, assert_is_not_none, assert_set_equal )", "class before any methods\"\"\" # pylint: disable=abstract-class-instantiated cls.__abstractmethods_cache = outputs.AWSOutput.__abstractmethods__ outputs.AWSOutput.__abstractmethods__ = frozenset()", "assert_equal(props['url'], 'https://events.pagerduty.com/generic/2010-04-15/create_event.json') def _setup_dispatch(self): \"\"\"Helper for setting up PagerDutyOutput dispatch\"\"\" remove_temp_secrets() # Cache", "'*test_key_02:* test_value_02') def _setup_dispatch(self): \"\"\"Helper for setting up SlackOutput dispatch\"\"\" remove_temp_secrets() output_name =", "TestPhantomOutput(object): \"\"\"Test class for PhantomOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any", "change the getcode return value the second time # it is called. This", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software", "import OutputProperty from stream_alert_cli.helpers import create_lambda_function, put_mock_creds from tests.unit.stream_alert_alert_processor import CONFIG, FUNCTION_NAME, KMS_ALIAS,", "to %s', self.__service) @patch('stream_alert.alert_processor.output_base.StreamOutputBase._request_helper') def test_dispatch_container_query(self, request_mock): \"\"\"PhantomOutput - Container Query URL\"\"\" alert", "and limitations under the License. \"\"\" # pylint: disable=protected-access from collections import Counter,", "= self.__dispatcher._json_to_slack_mrkdwn(simple_list, 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_to_slack_mrkdwn_multi_nested(self):", "dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service)", "output dispatcher - log error\"\"\" bad_service = 'bad-output' outputs.get_output_dispatcher(bad_service, REGION, FUNCTION_NAME, CONFIG) log_mock.assert_called_with(", "\"\"\"Helper for setting up LambdaOutput dispatch\"\"\" function_name = CONFIG[self.__service][alt_descriptor or self.__descriptor] create_lambda_function(function_name, REGION)", "map to text\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_map_to_text(simple_dict, '\\t',", "to text\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_map_to_text(simple_dict, '\\t', 0)", "test_aws_format_output_config(self): \"\"\"AWSOutput format output config\"\"\" props = { 'descriptor': OutputProperty( 'short_descriptor', 'descriptor_value'), 'aws_value':", "simple str\"\"\" simple_str = 'value to format' result = self.__dispatcher._json_to_slack_mrkdwn(simple_str, 0) assert_equal(len(result), 1)", "\"\"\"PhantomOutput dispatch success, existing container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect =", "'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', 300)", "after all methods\"\"\" cls.__dispatcher = None def test_get_default_properties(self): \"\"\"Get Default Properties - PagerDuty\"\"\"", "success, new container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\": 0,", "TestSlackOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any", "%s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"PagerDutyOutput dispatch failure\"\"\"", "log_mock): \"\"\"PhantomOutput dispatch decode error (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200", "= 'phantom' cls.__descriptor = 'unit_test_phantom' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def", "def test_locals(self): \"\"\"LambdaOutput local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'LambdaOutput') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self, alt_descriptor=''): \"\"\"Helper", "down the chain somewhere url_mock.return_value.getcode.side_effect = [200, 400] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to", "log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock):", "alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PagerDutyOutput dispatch bad", "all methods\"\"\" cls.__dispatcher = None def _setup_dispatch(self, url): \"\"\"Helper for setting up PhantomOutput", "test_value_02') def _setup_dispatch(self): \"\"\"Helper for setting up SlackOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor)", "self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"PagerDutyOutput dispatch failure\"\"\" alert", "= ['', '{\"id\": 1902}'] # Use side_effect to change the getcode return value", "# pylint: disable=protected-access from collections import Counter, OrderedDict import json import boto3 from", "def test_json_to_slack_mrkdwn_multi_nested(self): \"\"\"JSON to Slack mrkdwn - multi type nested\"\"\" nested_dict = OrderedDict([", "return get_alert() @mock_lambda @patch('logging.Logger.info') def test_dispatch(self, log_mock): \"\"\"LambdaOutput dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor,", "url_mock, log_error_mock): \"\"\"PagerDutyOutput dispatch failure\"\"\" alert = self._setup_dispatch() bad_message = '{\"error\": {\"message\": \"failed\",", "@classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'phantom' cls.__descriptor", "the class before any methods\"\"\" cls.__service = 'aws-s3' cls.__descriptor = 'unit_test_bucket' cls.__dispatcher =", "= 'aws-lambda' cls.__descriptor = 'unit_test_lambda' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def", "the License for the specific language governing permissions and limitations under the License.", "Airbnb Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "failure\"\"\" alert = self._setup_dispatch() bad_message = '{\"error\": {\"message\": \"failed\", \"errors\": [\"err1\", \"err2\"]}}' url_mock.return_value.read.return_value", "7) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(Counter(result[6])['\\t'], 2) def test_json_to_slack_mrkdwn_list(self): \"\"\"JSON to Slack mrkdwn", "mock_lambda from nose.tools import ( assert_equal, assert_is_none, assert_is_not_none, assert_set_equal ) from stream_alert.alert_processor import", "'\\t', 0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_map_to_text(self): \"\"\"JSON", "class TestS3Ouput(object): \"\"\"Test class for S3Output\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before", "CONFIG[self.__service][alt_descriptor or self.__descriptor] create_lambda_function(function_name, REGION) return get_alert() @mock_lambda @patch('logging.Logger.info') def test_dispatch(self, log_mock): \"\"\"LambdaOutput", "the class after all methods\"\"\" cls.dispatcher = None def test_locals(self): \"\"\"S3Output local variables\"\"\"", "\"\"\"JSON map to text\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_map_to_text(simple_dict,", "0) assert_equal(len(result), 2) assert_equal(result[0], '*[1]* test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_to_slack_mrkdwn_multi_nested(self): \"\"\"JSON to", "])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 7) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1)", "to %s', self.__service) @mock_s3 @mock_kms class TestPhantomOutput(object): \"\"\"Test class for PhantomOutput\"\"\" @classmethod def", "dict\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_to_slack_mrkdwn(simple_dict, 0) assert_equal(len(result), 2)", "cls.__dispatcher.__service__ = 'aws-s3' @classmethod def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" outputs.AWSOutput.__abstractmethods__", "to Slack mrkdwn - multi type nested\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02',", "in outputs.STREAM_OUTPUTS.values(): props = output(REGION, FUNCTION_NAME, CONFIG).get_user_defined_properties() # The user defined properties should", "call, patch from moto import mock_s3, mock_kms, mock_lambda from nose.tools import ( assert_equal,", "assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self): \"\"\"Helper for setting up S3Output dispatch\"\"\" bucket = CONFIG[self.__service][self.__descriptor]", "def _setup_dispatch(self): \"\"\"Helper for setting up SlackOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds", "Properties - PagerDuty\"\"\" props = self.__dispatcher._get_default_properties() assert_equal(len(props), 1) assert_equal(props['url'], 'https://events.pagerduty.com/generic/2010-04-15/create_event.json') def _setup_dispatch(self): \"\"\"Helper", "\"\"\"JSON to Slack mrkdwn - multi type nested\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'),", "OrderedDict([ ('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', 300) ])) ])) ]) result", "License, Version 2.0 (the \"License\"); you may not use this file except in", "cls.__descriptor = 'unit_test_channel' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown", "def test_dispatch_container_error(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch decode error (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar')", "a descriptor assert_is_not_none(props.get('descriptor')) class TestPagerDutyOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup", "log_mock): \"\"\"PhantomOutput dispatch failure (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor,", "\"\"\"SlackOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send", "list to text\"\"\" simple_list = ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_list_to_text(simple_list, '\\t', 0) assert_equal(len(result),", "failure (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed", "1) assert_equal(result[-1], '\\t\\t\\t*[3]* 51919') def test_json_list_to_text(self): \"\"\"JSON list to text\"\"\" simple_list = ['test_value_01',", "= 'aws-s3' @classmethod def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" outputs.AWSOutput.__abstractmethods__ =", "# The user defined properties should at a minimum contain a descriptor assert_is_not_none(props.get('descriptor'))", "self._setup_dispatch('phantom.foo.bar') url_mock.return_value.read.side_effect = ['', '{\"id\": 1902}'] # Use side_effect to change the getcode", "\"\"\"PhantomOutput dispatch failure (artifact)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.read.side_effect = ['', '{\"id\": 1902}'] #", "self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"SlackOutput dispatch failure\"\"\" alert", "creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock,", "%s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"SlackOutput dispatch failure\"\"\"", "0, \"data\": []}', '{\"id\": 1948}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s',", "teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.__dispatcher = None def test_format_message_single(self): \"\"\"Format", "def _teardown_dispatch(self): \"\"\"Replace method with cached method\"\"\" self.__dispatcher._get_default_properties = self.__backup_method @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3", "def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'aws-lambda' cls.__descriptor =", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License", "test_locals(self): \"\"\"S3Output local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'S3Output') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self): \"\"\"Helper for setting", "'unit_test_phantom' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the class", "url_mock.return_value.read.return_value = bad_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send", "class TestPhantomOutput(object): \"\"\"Test class for PhantomOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before", "1) assert_equal(props['url'], 'https://events.pagerduty.com/generic/2010-04-15/create_event.json') def _setup_dispatch(self): \"\"\"Helper for setting up PagerDutyOutput dispatch\"\"\" remove_temp_secrets() #", "_teardown_dispatch(self): \"\"\"Replace method with cached method\"\"\" self.__dispatcher._get_default_properties = self.__backup_method @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms", "before any methods\"\"\" cls.__service = 'slack' cls.__descriptor = 'unit_test_channel' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION,", "to Slack mrkdwn - nested dict\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'),", "'test_rule_single' alert = get_random_alert(25, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests assert_set_equal(set(loaded_message.keys()), {'text',", "cls.__descriptor = 'unit_test_bucket' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown", "- simple list\"\"\" simple_list = ['test_value_01', 'test_value_02'] result = self.__dispatcher._json_to_slack_mrkdwn(simple_list, 0) assert_equal(len(result), 2)", "to %s', self.__service) @patch('logging.Logger.error') def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PhantomOutput dispatch bad descriptor\"\"\" alert =", "'unit_test_bucket' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the class", "@patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_existing_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success, existing container\"\"\" alert =", "create_lambda_function, put_mock_creds from tests.unit.stream_alert_alert_processor import CONFIG, FUNCTION_NAME, KMS_ALIAS, REGION from tests.unit.stream_alert_alert_processor.helpers import (", "= self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) class TestLambdaOuput(object):", "None def _setup_dispatch(self, url): \"\"\"Helper for setting up PhantomOutput dispatch\"\"\" remove_temp_secrets() output_name =", "rule_name='rule_name', alert=alert) log_error_mock.assert_any_call('Encountered an error while sending to Slack: %s', error_message) log_error_mock.assert_any_call('Failed to", "self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @mock_s3 @mock_kms class TestPhantomOutput(object): \"\"\"Test", "REGION, FUNCTION_NAME, CONFIG) assert_is_none(dispatcher) @patch('logging.Logger.error') def test_get_output_dispatcher_logging(log_mock): \"\"\"Get output dispatcher - log error\"\"\"", "1) assert_equal(result[0], simple_str) def test_json_to_slack_mrkdwn_dict(self): \"\"\"JSON to Slack mrkdwn - simple dict\"\"\" simple_dict", "test_json_map_to_text(self): \"\"\"JSON map to text\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result =", "\"\"\"SlackOutput dispatch failure\"\"\" alert = self._setup_dispatch() error_message = 'a helpful error message' url_mock.return_value.read.return_value", "assert_equal(Counter(result[4])['\\t'], 1) assert_equal(Counter(result[6])['\\t'], 2) def test_json_to_slack_mrkdwn_list(self): \"\"\"JSON to Slack mrkdwn - simple list\"\"\"", "'a helpful error message' url_mock.return_value.read.return_value = error_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert)", "0) assert_equal(len(result), 10) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(result[-1], '\\t\\t\\t*[3]* 51919') def test_json_list_to_text(self): \"\"\"JSON", "'000028') def test_format_message_default_rule_description(self): \"\"\"Format Message Default Rule Description - Slack\"\"\" rule_name = 'test_empty_rule_description'", "cls.__service = 'aws-lambda' cls.__descriptor = 'unit_test_lambda' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod", "service, REGION, FUNCTION_NAME, CONFIG) assert_is_not_none(dispatcher) def test_nonexistent_get_output_dispatcher(): \"\"\"Get output dispatcher - nonexistent\"\"\" nonexistent_service", "for AWSOutput Base\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" #", "any methods\"\"\" cls.__service = 'phantom' cls.__descriptor = 'unit_test_phantom' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME,", "alert=alert) log_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PhantomOutput", "any methods\"\"\" cls.__service = 'slack' cls.__descriptor = 'unit_test_channel' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME,", "arn value, bucket, etc', 'bucket.value')} formatted_config = self.__dispatcher.format_output_config(CONFIG, props) assert_equal(len(formatted_config), 2) assert_is_not_none(formatted_config.get('descriptor_value')) assert_is_not_none(formatted_config.get('unit_test_bucket'))", "format output config\"\"\" props = { 'descriptor': OutputProperty( 'short_descriptor', 'descriptor_value'), 'aws_value': OutputProperty( 'unique", "outputs.get_output_dispatcher( service, REGION, FUNCTION_NAME, CONFIG) assert_is_not_none(dispatcher) def test_nonexistent_get_output_dispatcher(): \"\"\"Get output dispatcher - nonexistent\"\"\"", "output dispatcher - existing\"\"\" service = 'aws-s3' dispatcher = outputs.get_output_dispatcher( service, REGION, FUNCTION_NAME,", "log_error_mock): \"\"\"PagerDutyOutput dispatch failure\"\"\" alert = self._setup_dispatch() bad_message = '{\"error\": {\"message\": \"failed\", \"errors\":", "url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error')", "not exist', bad_service) def test_user_defined_properties(): \"\"\"Get user defined properties\"\"\" for output in outputs.STREAM_OUTPUTS.values():", "[200, 400] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error')", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "@patch('urllib2.urlopen') def test_dispatch_container_error(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch decode error (setup container)\"\"\" alert =", "class for S3Output\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service", "- Container Query URL\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) full_url = 'phantom.foo.bar/rest/container?_filter_name=\"rule_name\"&page_size=1'", "%s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_error(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch decode error (setup", "log_mock.assert_called_with( 'designated output service [%s] does not exist', bad_service) def test_user_defined_properties(): \"\"\"Get user", "\"\"\"Test class for PhantomOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\"", "log_error_mock): \"\"\"SlackOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to", "@patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (setup container)\"\"\" alert =", "for the specific language governing permissions and limitations under the License. \"\"\" #", "= {'url': 'https://api.slack.com/web-hook-key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3", "after all methods\"\"\" cls.dispatcher = None def test_locals(self): \"\"\"LambdaOutput local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'LambdaOutput')", "= self.__dispatcher.format_output_config(CONFIG, props) assert_equal(len(formatted_config), 2) assert_is_not_none(formatted_config.get('descriptor_value')) assert_is_not_none(formatted_config.get('unit_test_bucket')) def test_dispatch(self): \"\"\"AWSOutput dispatch pass\"\"\" passed", "def test_get_default_properties(self): \"\"\"Get Default Properties - PagerDuty\"\"\" props = self.__dispatcher._get_default_properties() assert_equal(len(props), 1) assert_equal(props['url'],", "dispatcher = outputs.get_output_dispatcher( service, REGION, FUNCTION_NAME, CONFIG) assert_is_not_none(dispatcher) def test_nonexistent_get_output_dispatcher(): \"\"\"Get output dispatcher", "send alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_error(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch", "OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_to_slack_mrkdwn(simple_dict, 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02')", "dispatch failure\"\"\" alert = self._setup_dispatch() error_message = 'a helpful error message' url_mock.return_value.read.return_value =", "self._setup_dispatch() bad_message = '{\"error\": {\"message\": \"failed\", \"errors\": [\"err1\", \"err2\"]}}' url_mock.return_value.read.return_value = bad_message url_mock.return_value.getcode.return_value", "%s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PagerDutyOutput dispatch bad descriptor\"\"\" alert", "self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) response = str( call('An error occurred while decoding ' 'Phantom", "tests.unit.stream_alert_alert_processor import CONFIG, FUNCTION_NAME, KMS_ALIAS, REGION from tests.unit.stream_alert_alert_processor.helpers import ( get_random_alert, get_alert, remove_temp_secrets", "'http://pagerduty.foo.bar/create_event.json', 'service_key': 'mocked_service_key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() def _teardown_dispatch(self): \"\"\"Replace", "teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.__dispatcher = None def _setup_dispatch(self, url):", "= 'a helpful error message' url_mock.return_value.read.return_value = error_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name',", "def _setup_dispatch(self): \"\"\"Helper for setting up S3Output dispatch\"\"\" bucket = CONFIG[self.__service][self.__descriptor] boto3.client('s3', region_name=REGION).create_bucket(Bucket=bucket)", "@classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'aws-s3' cls.__descriptor", "to send alert to %s', self.__service) @patch('stream_alert.alert_processor.output_base.StreamOutputBase._request_helper') def test_dispatch_container_query(self, request_mock): \"\"\"PhantomOutput - Container", "sent alert to %s', self.__service) class TestLambdaOuput(object): \"\"\"Test class for LambdaOutput\"\"\" @classmethod def", "= self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.return_value = 'this\\nis\\nnot\\njson' self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) response =", "log_error_mock.assert_any_call('Encountered an error while sending to Slack: %s', error_message) log_error_mock.assert_any_call('Failed to send alert", "self.__dispatcher.format_output_config(CONFIG, props) assert_equal(len(formatted_config), 2) assert_is_not_none(formatted_config.get('descriptor_value')) assert_is_not_none(formatted_config.get('unit_test_bucket')) def test_dispatch(self): \"\"\"AWSOutput dispatch pass\"\"\" passed =", "output config\"\"\" props = { 'descriptor': OutputProperty( 'short_descriptor', 'descriptor_value'), 'aws_value': OutputProperty( 'unique arn", "send alert to %s', self.__service) @patch('logging.Logger.error') def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PhantomOutput dispatch bad descriptor\"\"\"", "def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PhantomOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert)", "defined properties\"\"\" for output in outputs.STREAM_OUTPUTS.values(): props = output(REGION, FUNCTION_NAME, CONFIG).get_user_defined_properties() # The", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "alert=alert) log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('stream_alert.alert_processor.output_base.StreamOutputBase._request_helper') def test_dispatch_container_query(self, request_mock): \"\"\"PhantomOutput", "assert_is_none(dispatcher) @patch('logging.Logger.error') def test_get_output_dispatcher_logging(log_mock): \"\"\"Get output dispatcher - log error\"\"\" bad_service = 'bad-output'", "test_dispatch(self, log_mock): \"\"\"LambdaOutput dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert", "response = str( call('An error occurred while decoding ' 'Phantom container query response", "cls.dispatcher = None def test_locals(self): \"\"\"S3Output local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'S3Output') assert_equal(self.__dispatcher.__service__, self.__service) def", "LambdaOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'aws-lambda'", "FUNCTION_NAME, CONFIG).get_user_defined_properties() # The user defined properties should at a minimum contain a", "for setting up PagerDutyOutput dispatch\"\"\" remove_temp_secrets() # Cache the _get_default_properties and set it", "self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock):", "REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"SlackOutput", "def _setup_dispatch(self, alt_descriptor=''): \"\"\"Helper for setting up LambdaOutput dispatch\"\"\" function_name = CONFIG[self.__service][alt_descriptor or", "test_dispatch_existing_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success, existing container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value =", "= outputs.AWSOutput(REGION, FUNCTION_NAME, CONFIG) cls.__dispatcher.__service__ = 'aws-s3' @classmethod def teardown_class(cls): \"\"\"Teardown the class", "setting up SlackOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'https://api.slack.com/web-hook-key'} put_mock_creds(output_name,", "mrkdwn - simple str\"\"\" simple_str = 'value to format' result = self.__dispatcher._json_to_slack_mrkdwn(simple_str, 0)", "# Use side_effect to change the getcode return value the second time #", "for setting up SlackOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'https://api.slack.com/web-hook-key'}", "FUNCTION_NAME, CONFIG) log_mock.assert_called_with( 'designated output service [%s] does not exist', bad_service) def test_user_defined_properties():", "url_mock, log_info_mock): \"\"\"SlackOutput dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name',", "use this file except in compliance with the License. You may obtain a", "self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('stream_alert.alert_processor.output_base.StreamOutputBase._request_helper') def", "cls.dispatcher = None def test_locals(self): \"\"\"LambdaOutput local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'LambdaOutput') assert_equal(self.__dispatcher.__service__, self.__service) def", "mock_kms, mock_lambda from nose.tools import ( assert_equal, assert_is_none, assert_is_not_none, assert_set_equal ) from stream_alert.alert_processor", "'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', [", "def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'aws-s3' cls.__descriptor =", "= self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @mock_lambda @patch('logging.Logger.info')", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "with cached method\"\"\" self.__dispatcher._get_default_properties = self.__backup_method @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock,", "= None def test_locals(self): \"\"\"LambdaOutput local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__, 'LambdaOutput') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self,", "bad_service) def test_user_defined_properties(): \"\"\"Get user defined properties\"\"\" for output in outputs.STREAM_OUTPUTS.values(): props =", "class before any methods\"\"\" cls.__service = 'aws-s3' cls.__descriptor = 'unit_test_bucket' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service,", "frozenset() cls.__dispatcher = outputs.AWSOutput(REGION, FUNCTION_NAME, CONFIG) cls.__dispatcher.__service__ = 'aws-s3' @classmethod def teardown_class(cls): \"\"\"Teardown", "log_error_mock): \"\"\"SlackOutput dispatch failure\"\"\" alert = self._setup_dispatch() error_message = 'a helpful error message'", "['{\"count\": 1, \"data\": [{\"id\": 1948}]}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s',", "log_error_mock): \"\"\"PagerDutyOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed", "bucket = CONFIG[self.__service][self.__descriptor] boto3.client('s3', region_name=REGION).create_bucket(Bucket=bucket) return get_alert() @patch('logging.Logger.info') @mock_s3 def test_dispatch(self, log_mock): \"\"\"S3Output", "nonexistent\"\"\" nonexistent_service = 'aws-s4' dispatcher = outputs.get_output_dispatcher(nonexistent_service, REGION, FUNCTION_NAME, CONFIG) assert_is_none(dispatcher) @patch('logging.Logger.error') def", "assert_equal(self.__dispatcher.__class__.__name__, 'S3Output') assert_equal(self.__dispatcher.__service__, self.__service) def _setup_dispatch(self): \"\"\"Helper for setting up S3Output dispatch\"\"\" bucket", "(setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.return_value = 'this\\nis\\nnot\\njson' self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name',", "put_mock_creds from tests.unit.stream_alert_alert_processor import CONFIG, FUNCTION_NAME, KMS_ALIAS, REGION from tests.unit.stream_alert_alert_processor.helpers import ( get_random_alert,", "alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) class", "description provided\\n' assert_equal( loaded_message['attachments'][0]['pretext'], default_rule_description) def test_json_to_slack_mrkdwn_str(self): \"\"\"JSON to Slack mrkdwn - simple", "self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_existing_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "\"\"\"SlackOutput dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_info_mock.assert_called_with('Successfully", "log_mock): \"\"\"PhantomOutput dispatch failure (artifact)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.read.side_effect = ['', '{\"id\": 1902}']", "self.__service) @mock_lambda @patch('logging.Logger.info') def test_dispatch_with_qualifier(self, log_mock): \"\"\"LambdaOutput dispatch with qualifier\"\"\" alt_descriptor = '{}_qual'.format(self.__descriptor)", "= json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests default_rule_description = '*Rule Description:*\\nNo rule description provided\\n' assert_equal(", "= { 'descriptor': OutputProperty( 'short_descriptor', 'descriptor_value'), 'aws_value': OutputProperty( 'unique arn value, bucket, etc',", "SlackOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'https://api.slack.com/web-hook-key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket,", "('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01',", "limitations under the License. \"\"\" # pylint: disable=protected-access from collections import Counter, OrderedDict", "methods\"\"\" cls.__dispatcher = None def test_get_default_properties(self): \"\"\"Get Default Properties - PagerDuty\"\"\" props =", "('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', [ 6161, 1051, 51919 ]) ])) ])) ])", "'\\t', 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def _setup_dispatch(self): \"\"\"Helper for setting up", "Slack mrkdwn - multi type nested\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'),", "boto3 from mock import call, patch from moto import mock_s3, mock_kms, mock_lambda from", "existing container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\": 1, \"data\":", "url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 400", "('nested_key_01', 100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', 300) ])) ])) ]) result =", "_setup_dispatch(self): \"\"\"Helper for setting up S3Output dispatch\"\"\" bucket = CONFIG[self.__service][self.__descriptor] boto3.client('s3', region_name=REGION).create_bucket(Bucket=bucket) return", "alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock): \"\"\"PagerDutyOutput", "value, bucket, etc', 'bucket.value')} formatted_config = self.__dispatcher.format_output_config(CONFIG, props) assert_equal(len(formatted_config), 2) assert_is_not_none(formatted_config.get('descriptor_value')) assert_is_not_none(formatted_config.get('unit_test_bucket')) def", "= self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_info_mock.assert_called_with('Successfully sent alert to %s',", "= outputs.get_output_dispatcher(nonexistent_service, REGION, FUNCTION_NAME, CONFIG) assert_is_none(dispatcher) @patch('logging.Logger.error') def test_get_output_dispatcher_logging(log_mock): \"\"\"Get output dispatcher -", "self.__service) @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_new_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success, new container\"\"\" alert", "cls.__abstractmethods_cache = outputs.AWSOutput.__abstractmethods__ outputs.AWSOutput.__abstractmethods__ = frozenset() cls.__dispatcher = outputs.AWSOutput(REGION, FUNCTION_NAME, CONFIG) cls.__dispatcher.__service__ =", "to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_failure(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch failure (setup", "self.__dispatcher._json_to_slack_mrkdwn(simple_str, 0) assert_equal(len(result), 1) assert_equal(result[0], simple_str) def test_json_to_slack_mrkdwn_dict(self): \"\"\"JSON to Slack mrkdwn -", "to send alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"SlackOutput", "REGION, FUNCTION_NAME, CONFIG) assert_is_not_none(dispatcher) def test_nonexistent_get_output_dispatcher(): \"\"\"Get output dispatcher - nonexistent\"\"\" nonexistent_service =", "'phantom' cls.__descriptor = 'unit_test_phantom' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls):", "assert_equal(len(result), 10) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(result[-1], '\\t\\t\\t*[3]* 51919') def test_json_list_to_text(self): \"\"\"JSON list", "@mock_kms class TestPhantomOutput(object): \"\"\"Test class for PhantomOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class", "{'url': 'https://api.slack.com/web-hook-key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() @patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms", "- Slack\"\"\" rule_name = 'test_rule_multi-part' alert = get_random_alert(30, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert))", "failure\"\"\" alert = self._setup_dispatch() error_message = 'a helpful error message' url_mock.return_value.read.return_value = error_message", "FUNCTION_NAME, CONFIG) assert_is_not_none(dispatcher) def test_nonexistent_get_output_dispatcher(): \"\"\"Get output dispatcher - nonexistent\"\"\" nonexistent_service = 'aws-s4'", "self.__service) def _setup_dispatch(self, alt_descriptor=''): \"\"\"Helper for setting up LambdaOutput dispatch\"\"\" function_name = CONFIG[self.__service][alt_descriptor", "dispatch\"\"\" remove_temp_secrets() # Cache the _get_default_properties and set it to return None self.__backup_method", "51919 ]) ])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 10) assert_equal(result[2], '*root_nested_01:*')", "cls.__service = 'slack' cls.__descriptor = 'unit_test_channel' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod", "test_format_message_single(self): \"\"\"Format Single Message - Slack\"\"\" rule_name = 'test_rule_single' alert = get_random_alert(25, rule_name)", "Slack mrkdwn - simple str\"\"\" simple_str = 'value to format' result = self.__dispatcher._json_to_slack_mrkdwn(simple_str,", "\"\"\"PhantomOutput dispatch failure (setup container)\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name',", "to send alert to %s', self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PagerDutyOutput", "loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_multi-part*') assert_equal(len(loaded_message['attachments']), 2) assert_equal(loaded_message['attachments'][1] ['text'].split('\\n')[3][1:7], '000028') def test_format_message_default_rule_description(self): \"\"\"Format", "Container Query URL\"\"\" alert = self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) full_url = 'phantom.foo.bar/rest/container?_filter_name=\"rule_name\"&page_size=1' headers", "\"\"\"JSON to Slack mrkdwn - nested dict\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02',", "= self.__dispatcher._json_to_slack_mrkdwn(simple_dict, 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def test_json_to_slack_mrkdwn_nested_dict(self): \"\"\"JSON to Slack", "json import boto3 from mock import call, patch from moto import mock_s3, mock_kms,", "51919') def test_json_list_to_text(self): \"\"\"JSON list to text\"\"\" simple_list = ['test_value_01', 'test_value_02'] result =", "= 200 url_mock.return_value.read.return_value = 'this\\nis\\nnot\\njson' self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) response = str( call('An error", "error_message url_mock.return_value.getcode.return_value = 400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_error_mock.assert_any_call('Encountered an error while sending to", "True) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests default_rule_description = '*Rule Description:*\\nNo rule description", "def test_dispatch_with_qualifier(self, log_mock): \"\"\"LambdaOutput dispatch with qualifier\"\"\" alt_descriptor = '{}_qual'.format(self.__descriptor) alert = self._setup_dispatch(alt_descriptor)", "@classmethod def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.__dispatcher = None def", "return None self.__backup_method = self.__dispatcher._get_default_properties self.__dispatcher._get_default_properties = lambda: None output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds", "alt_descriptor = '{}_qual'.format(self.__descriptor) alert = self._setup_dispatch(alt_descriptor) self.__dispatcher.dispatch(descriptor=alt_descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to", "any methods\"\"\" cls.__service = 'aws-lambda' cls.__descriptor = 'unit_test_lambda' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME,", "Triggered: test_rule_multi-part*') assert_equal(len(loaded_message['attachments']), 2) assert_equal(loaded_message['attachments'][1] ['text'].split('\\n')[3][1:7], '000028') def test_format_message_default_rule_description(self): \"\"\"Format Message Default Rule", "props = output(REGION, FUNCTION_NAME, CONFIG).get_user_defined_properties() # The user defined properties should at a", "the Apache License, Version 2.0 (the \"License\"); you may not use this file", "400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_error_mock.assert_any_call('Encountered an error while sending to Slack: %s', error_message)", "@patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"PagerDutyOutput dispatch success\"\"\" alert = self._setup_dispatch()", "self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 7) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(Counter(result[6])['\\t'], 2) def test_json_to_slack_mrkdwn_list(self): \"\"\"JSON", "log_mock): \"\"\"PhantomOutput dispatch success, existing container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect", "the specific language governing permissions and limitations under the License. \"\"\" # pylint:", "@patch('logging.Logger.info') def test_dispatch(self, log_mock): \"\"\"LambdaOutput dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully", "def test_dispatch_existing_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success, existing container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value", "- Slack\"\"\" rule_name = 'test_rule_single' alert = get_random_alert(25, rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert))", "alert)) # tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'], '*StreamAlert Rule Triggered: test_rule_multi-part*')", "alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) class TestLambdaOuput(object): \"\"\"Test class for LambdaOutput\"\"\"", "sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self, url_mock, log_error_mock):", "success, existing container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\": 1,", "self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) class TestLambdaOuput(object): \"\"\"Test", "Rule Triggered: test_rule_multi-part*') assert_equal(len(loaded_message['attachments']), 2) assert_equal(loaded_message['attachments'][1] ['text'].split('\\n')[3][1:7], '000028') def test_format_message_default_rule_description(self): \"\"\"Format Message Default", "outputs.AWSOutput.__abstractmethods__ outputs.AWSOutput.__abstractmethods__ = frozenset() cls.__dispatcher = outputs.AWSOutput(REGION, FUNCTION_NAME, CONFIG) cls.__dispatcher.__service__ = 'aws-s3' @classmethod", "Rule Triggered: test_rule_single*') assert_equal(len(loaded_message['attachments']), 1) def test_format_message_mutliple(self): \"\"\"Format Multi-Message - Slack\"\"\" rule_name =", "@patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"SlackOutput dispatch success\"\"\" alert =", "assert_is_not_none(formatted_config.get('unit_test_bucket')) def test_dispatch(self): \"\"\"AWSOutput dispatch pass\"\"\" passed = self.__dispatcher.dispatch() assert_is_none(passed) class TestS3Ouput(object): \"\"\"Test", "\"\"\"Teardown the class after all methods\"\"\" cls.__dispatcher = None def test_format_message_single(self): \"\"\"Format Single", "= self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'https://api.slack.com/web-hook-key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert()", "\"\"\"JSON to Slack mrkdwn - simple list\"\"\" simple_list = ['test_value_01', 'test_value_02'] result =", "outputs.AWSOutput.__abstractmethods__ = frozenset() cls.__dispatcher = outputs.AWSOutput(REGION, FUNCTION_NAME, CONFIG) cls.__dispatcher.__service__ = 'aws-s3' @classmethod def", "assert_set_equal ) from stream_alert.alert_processor import outputs from stream_alert.alert_processor.output_base import OutputProperty from stream_alert_cli.helpers import", "= 200 url_mock.return_value.read.side_effect = ['{\"count\": 0, \"data\": []}', '{\"id\": 1948}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert)", "alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert to %s', self.__service)", "methods\"\"\" # pylint: disable=abstract-class-instantiated cls.__abstractmethods_cache = outputs.AWSOutput.__abstractmethods__ outputs.AWSOutput.__abstractmethods__ = frozenset() cls.__dispatcher = outputs.AWSOutput(REGION,", "patch from moto import mock_s3, mock_kms, mock_lambda from nose.tools import ( assert_equal, assert_is_none,", "= self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) class", "self.__service) @patch('logging.Logger.error') @mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"SlackOutput dispatch bad descriptor\"\"\" alert =", "= {'url': 'http://pagerduty.foo.bar/create_event.json', 'service_key': 'mocked_service_key'} put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS) return get_alert() def", "log_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PhantomOutput dispatch", "2) assert_is_not_none(formatted_config.get('descriptor_value')) assert_is_not_none(formatted_config.get('unit_test_bucket')) def test_dispatch(self): \"\"\"AWSOutput dispatch pass\"\"\" passed = self.__dispatcher.dispatch() assert_is_none(passed) class", "assert_equal(len(loaded_message['attachments']), 2) assert_equal(loaded_message['attachments'][1] ['text'].split('\\n')[3][1:7], '000028') def test_format_message_default_rule_description(self): \"\"\"Format Message Default Rule Description -", "= self._setup_dispatch('phantom.foo.bar') self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) full_url = 'phantom.foo.bar/rest/container?_filter_name=\"rule_name\"&page_size=1' headers = {'ph-auth-token': 'mocked_auth_token'} request_mock.assert_has_calls([call(full_url,", "1) def test_format_message_mutliple(self): \"\"\"Format Multi-Message - Slack\"\"\" rule_name = 'test_rule_multi-part' alert = get_random_alert(30,", "headers, False)]) class TestSlackOutput(object): \"\"\"Test class for PagerDutyOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the", "= self.__dispatcher._json_map_to_text(simple_dict, '\\t', 0) assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def _setup_dispatch(self): \"\"\"Helper for", "= 'unit_test_bucket' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the", "test_value_01') assert_equal(result[1], '*[2]* test_value_02') def test_json_to_slack_mrkdwn_multi_nested(self): \"\"\"JSON to Slack mrkdwn - multi type", "@mock_s3 @mock_kms def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"PagerDutyOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor',", "_get_default_properties and set it to return None self.__backup_method = self.__dispatcher._get_default_properties self.__dispatcher._get_default_properties = lambda:", "test_dispatch_with_qualifier(self, log_mock): \"\"\"LambdaOutput dispatch with qualifier\"\"\" alt_descriptor = '{}_qual'.format(self.__descriptor) alert = self._setup_dispatch(alt_descriptor) self.__dispatcher.dispatch(descriptor=alt_descriptor,", "100), ('nested_key_02', 200), ('nested_nested_01', OrderedDict([ ('nested_nested_key_01', 300) ])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict,", "2) assert_equal(loaded_message['attachments'][1] ['text'].split('\\n')[3][1:7], '000028') def test_format_message_default_rule_description(self): \"\"\"Format Message Default Rule Description - Slack\"\"\"", "- multi type nested\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([", "assert_equal(len(result), 2) assert_equal(result[1], '*test_key_02:* test_value_02') def test_json_to_slack_mrkdwn_nested_dict(self): \"\"\"JSON to Slack mrkdwn - nested", "qualifier\"\"\" alt_descriptor = '{}_qual'.format(self.__descriptor) alert = self._setup_dispatch(alt_descriptor) self.__dispatcher.dispatch(descriptor=alt_descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert", "simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result = self.__dispatcher._json_to_slack_mrkdwn(simple_dict, 0) assert_equal(len(result), 2) assert_equal(result[1],", "'this\\nis\\nnot\\njson' self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) response = str( call('An error occurred while decoding '", "\"\"\"LambdaOutput dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s',", "before any methods\"\"\" # pylint: disable=abstract-class-instantiated cls.__abstractmethods_cache = outputs.AWSOutput.__abstractmethods__ outputs.AWSOutput.__abstractmethods__ = frozenset() cls.__dispatcher", "result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 7) assert_equal(result[2], '*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(Counter(result[6])['\\t'], 2) def", "# Cache the _get_default_properties and set it to return None self.__backup_method = self.__dispatcher._get_default_properties", "bad_service = 'bad-output' outputs.get_output_dispatcher(bad_service, REGION, FUNCTION_NAME, CONFIG) log_mock.assert_called_with( 'designated output service [%s] does", "the class after all methods\"\"\" cls.__dispatcher = None def _setup_dispatch(self, url): \"\"\"Helper for", "PhantomOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': url, 'ph_auth_token': '<PASSWORD>'} put_mock_creds(output_name,", "FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" cls.__dispatcher =", "= 'slack' cls.__descriptor = 'unit_test_channel' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def", "2) assert_equal(result[1], '*test_key_02:* test_value_02') def test_json_to_slack_mrkdwn_nested_dict(self): \"\"\"JSON to Slack mrkdwn - nested dict\"\"\"", "dispatch success, new container\"\"\" alert = self._setup_dispatch('phantom.foo.bar') url_mock.return_value.getcode.return_value = 200 url_mock.return_value.read.side_effect = ['{\"count\":", "container query response to JSON: %s', ValueError( 'No JSON object could be decoded',)))", "400 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error')", "Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not", "dispatch success\"\"\" alert = self._setup_dispatch() url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_info_mock.assert_called_with('Successfully sent", "test_get_output_dispatcher_logging(log_mock): \"\"\"Get output dispatcher - log error\"\"\" bad_service = 'bad-output' outputs.get_output_dispatcher(bad_service, REGION, FUNCTION_NAME,", "props = { 'descriptor': OutputProperty( 'short_descriptor', 'descriptor_value'), 'aws_value': OutputProperty( 'unique arn value, bucket,", "PhantomOutput\"\"\" @classmethod def setup_class(cls): \"\"\"Setup the class before any methods\"\"\" cls.__service = 'phantom'", "compliance with the License. You may obtain a copy of the License at", "log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_failure(self, url_mock,", "Default Properties - PagerDuty\"\"\" props = self.__dispatcher._get_default_properties() assert_equal(len(props), 1) assert_equal(props['url'], 'https://events.pagerduty.com/generic/2010-04-15/create_event.json') def _setup_dispatch(self):", "self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) full_url = 'phantom.foo.bar/rest/container?_filter_name=\"rule_name\"&page_size=1' headers = {'ph-auth-token': 'mocked_auth_token'} request_mock.assert_has_calls([call(full_url, None, headers,", "def teardown_class(cls): \"\"\"Teardown the class after all methods\"\"\" outputs.AWSOutput.__abstractmethods__ = cls.__abstractmethods_cache cls.__dispatcher =", "for output in outputs.STREAM_OUTPUTS.values(): props = output(REGION, FUNCTION_NAME, CONFIG).get_user_defined_properties() # The user defined", "to return None self.__backup_method = self.__dispatcher._get_default_properties self.__dispatcher._get_default_properties = lambda: None output_name = self.__dispatcher.output_cred_name(self.__descriptor)", "pylint: disable=protected-access from collections import Counter, OrderedDict import json import boto3 from mock", "url_mock.return_value.getcode.return_value = 200 self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) self._teardown_dispatch() log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service)", "rule_name) loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert)) # tests assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'}) assert_equal( loaded_message['text'],", "test_user_defined_properties(): \"\"\"Get user defined properties\"\"\" for output in outputs.STREAM_OUTPUTS.values(): props = output(REGION, FUNCTION_NAME,", "= outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG) @classmethod def teardown_class(cls): \"\"\"Teardown the class after all", "for setting up S3Output dispatch\"\"\" bucket = CONFIG[self.__service][self.__descriptor] boto3.client('s3', region_name=REGION).create_bucket(Bucket=bucket) return get_alert() @patch('logging.Logger.info')", "alert to %s', self.__service) @patch('logging.Logger.info') @patch('urllib2.urlopen') def test_dispatch_new_container(self, url_mock, log_mock): \"\"\"PhantomOutput dispatch success,", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "assert_equal(result[0], simple_str) def test_json_to_slack_mrkdwn_dict(self): \"\"\"JSON to Slack mrkdwn - simple dict\"\"\" simple_dict =", "400] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert to %s', self.__service) @patch('logging.Logger.error') def", "alert=alert) log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) class TestAWSOutput(object): \"\"\"Test class for", "def test_dispatch_bad_descriptor(self, log_error_mock): \"\"\"SlackOutput dispatch bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert)", "applicable law or agreed to in writing, software distributed under the License is", "object could be decoded',))) assert_equal(str(log_mock.call_args_list[0]), response) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_failure(self, url_mock, log_mock): \"\"\"PhantomOutput", "def test_format_message_default_rule_description(self): \"\"\"Format Message Default Rule Description - Slack\"\"\" rule_name = 'test_empty_rule_description' alert", "@patch('logging.Logger.info') @patch('urllib2.urlopen') @mock_s3 @mock_kms def test_dispatch_success(self, url_mock, log_info_mock): \"\"\"PagerDutyOutput dispatch success\"\"\" alert =", "'bad-output' outputs.get_output_dispatcher(bad_service, REGION, FUNCTION_NAME, CONFIG) log_mock.assert_called_with( 'designated output service [%s] does not exist',", "descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert to %s',", "bad descriptor\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor='bad_descriptor', rule_name='rule_name', alert=alert) log_error_mock.assert_called_with('Failed to send alert to", "return get_alert() @patch('logging.Logger.info') @mock_s3 def test_dispatch(self, log_mock): \"\"\"S3Output dispatch\"\"\" alert = self._setup_dispatch() self.__dispatcher.dispatch(descriptor=self.__descriptor,", "assert_equal(loaded_message['attachments'][1] ['text'].split('\\n')[3][1:7], '000028') def test_format_message_default_rule_description(self): \"\"\"Format Message Default Rule Description - Slack\"\"\" rule_name", "class after all methods\"\"\" cls.dispatcher = None def test_locals(self): \"\"\"S3Output local variables\"\"\" assert_equal(self.__dispatcher.__class__.__name__,", "['text'].split('\\n')[3][1:7], '000028') def test_format_message_default_rule_description(self): \"\"\"Format Message Default Rule Description - Slack\"\"\" rule_name =", "alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_container_failure(self, url_mock, log_mock):", "log_error_mock.assert_called_with('Failed to send alert to %s', self.__service) @mock_s3 @mock_kms class TestPhantomOutput(object): \"\"\"Test class", "'descriptor_value'), 'aws_value': OutputProperty( 'unique arn value, bucket, etc', 'bucket.value')} formatted_config = self.__dispatcher.format_output_config(CONFIG, props)", "nested dict\"\"\" nested_dict = OrderedDict([ ('root_key_01', 'root_value_01'), ('root_02', 'root_value_02'), ('root_nested_01', OrderedDict([ ('nested_key_01', 100),", "'*root_nested_01:*') assert_equal(Counter(result[4])['\\t'], 1) assert_equal(result[-1], '\\t\\t\\t*[3]* 51919') def test_json_list_to_text(self): \"\"\"JSON list to text\"\"\" simple_list", "somewhere url_mock.return_value.getcode.side_effect = [200, 400] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Failed to send alert to", "alert to %s', self.__service) @mock_lambda @patch('logging.Logger.info') def test_dispatch_with_qualifier(self, log_mock): \"\"\"LambdaOutput dispatch with qualifier\"\"\"", "pylint: disable=abstract-class-instantiated cls.__abstractmethods_cache = outputs.AWSOutput.__abstractmethods__ outputs.AWSOutput.__abstractmethods__ = frozenset() cls.__dispatcher = outputs.AWSOutput(REGION, FUNCTION_NAME, CONFIG)", "= ['{\"count\": 1, \"data\": [{\"id\": 1948}]}'] self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to", "self.__dispatcher.dispatch(descriptor=self.__descriptor, rule_name='rule_name', alert=alert) log_mock.assert_called_with('Successfully sent alert to %s', self.__service) class TestLambdaOuput(object): \"\"\"Test class", "JSON object could be decoded',))) assert_equal(str(log_mock.call_args_list[0]), response) @patch('logging.Logger.error') @patch('urllib2.urlopen') def test_dispatch_failure(self, url_mock, log_mock):", "('nested_nested_key_01', 300) ])) ])) ]) result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0) assert_equal(len(result), 7) assert_equal(result[2], '*root_nested_01:*')", "Slack mrkdwn - simple dict\"\"\" simple_dict = OrderedDict([('test_key_01', 'test_value_01'), ('test_key_02', 'test_value_02')]) result =", "with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "= self.__dispatcher._get_default_properties self.__dispatcher._get_default_properties = lambda: None output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds = {'url': 'http://pagerduty.foo.bar/create_event.json',", "url): \"\"\"Helper for setting up PhantomOutput dispatch\"\"\" remove_temp_secrets() output_name = self.__dispatcher.output_cred_name(self.__descriptor) creds =" ]
[ "def sigmoid(x): return 1/(1+np.exp(-x)) def softmax(x): return np.exp(x)/np.sum(np.exp(x)) Wi,Ri,bi = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wo,Ro,bo =", "{} z_g = {} os = {} cs[-1] = np.copy(c_p) ys[-1] = np.copy(y_p)", "dWf += np.outer(dfg_,x) dRf += np.outer(dfg_,ys[t-1]) dy_f = np.dot(Rf.T,dfg_) dbf += dfg_ dog_", "= f_g[t] * (1-f_g[t])*dfg dWf += np.outer(dfg_,x) dRf += np.outer(dfg_,ys[t-1]) dy_f = np.dot(Rf.T,dfg_)", "plt import matplotlib.animation as animation plt.ion() dataset = open('../data/input.txt','r').read() #dataset = open('../data/code.txt','r').read() len_of_dataset", "np.dot(Wy,ys[t])+by p[t] = softmax(os[t]) loss += -np.log(p[t][o[t],0]) dWi,dRi,dbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) dWo,dRo,dbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo)", "numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation plt.ion() dataset", "= np.dot(Wy,ys[t])+by p[t] = softmax(os[t]) loss += -np.log(p[t][o[t],0]) dWi,dRi,dbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) dWo,dRo,dbo =", "np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) dWf,dRf,dbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) dWz,dRz,dbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) dWy,dby = np.zeros_like(Wy),np.zeros_like(by) dy_z,dy_f,dy_o,dy_i = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1))", "{char:idx for idx,char in enumerate(vocab)} idx_to_char = {idx:char for idx,char in enumerate(vocab)} print('char_to_idx:',char_to_idx)", "dWz,dRz,dbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) dWy,dby = np.zeros_like(Wy),np.zeros_like(by) dy_z,dy_f,dy_o,dy_i = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) dcs_c = np.zeros((len_of_vocab,1)) for", "np.dot(Rf.T,dfg_) dbf += dfg_ dog_ = o_g[t]*(1-o_g[t])*dog dWo += np.outer(dog_,x) dRo += np.outer(dog_,ys[t-1])", "dRi += np.outer(dig_,ys[t-1]) dy_i = np.dot(Ri.T,dig_) dbi += dig_ for param in [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz]:", "= {} f_g = {} z_g = {} os = {} cs[-1] =", "+ np.dot(Rf,ys[t-1])+bf f_g[t] = sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,ys[t-1])+bz z_g[t] = np.tanh(Z)", "= np.dot(Wz,x) + np.dot(Rz,y_p)+bz z_g = np.tanh(Z) c_p = i_g*z_g + f_g *c_p", "dRz += np.outer(dzg_,ys[t-1]) dbz += dzg_ dy_z = np.dot(Rz.T,dzg_) dfg_ = f_g[t] *", "np.dot(Ro.T,dog_) dbo += dog_ dig_ = i_g[t]*(1-i_g[t])*dig dWi += np.outer(dig_,x) dRi += np.outer(dig_,ys[t-1])", "idx.append(id) x = np.zeros((len_of_vocab,1)) x[id,0]=1 print(''.join([idx_to_char[c] for c in idx])) def forward_backward_pass(i,o,y_p,c_p): cs", "np.outer(dzg_,x) dRz += np.outer(dzg_,ys[t-1]) dbz += dzg_ dy_z = np.dot(Rz.T,dzg_) dfg_ = f_g[t]", "= {idx:char for idx,char in enumerate(vocab)} print('char_to_idx:',char_to_idx) print('idx_to_char:',idx_to_char) start_ptr = 0 lr =", "+= np.outer(dog_,ys[t-1]) dy_o = np.dot(Ro.T,dog_) dbo += dog_ dig_ = i_g[t]*(1-i_g[t])*dig dWi +=", "len(vocab) print('len of vocab:',len_of_vocab) char_to_idx = {char:idx for idx,char in enumerate(vocab)} idx_to_char =", "o_g = {} f_g = {} z_g = {} os = {} cs[-1]", "*cs[t-1] ys[t] = o_g[t] * np.tanh(cs[t]) os[t] = np.dot(Wy,ys[t])+by p[t] = softmax(os[t]) loss", "np.dot(Rz,ys[t-1])+bz z_g[t] = np.tanh(Z) cs[t] = i_g[t]*z_g[t] + f_g[t] *cs[t-1] ys[t] = o_g[t]", "+= np.outer(dfg_,ys[t-1]) dy_f = np.dot(Rf.T,dfg_) dbf += dfg_ dog_ = o_g[t]*(1-o_g[t])*dog dWo +=", "cs[t] = i_g[t]*z_g[t] + f_g[t] *cs[t-1] ys[t] = o_g[t] * np.tanh(cs[t]) os[t] =", "Wo,Ro,bo = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wf,Rf,bf = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wz,Rz,bz = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wy,by = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.zeros((len_of_vocab,1)) mWi,mRi,mbi", "for idx,char in enumerate(vocab)} print('char_to_idx:',char_to_idx) print('idx_to_char:',idx_to_char) start_ptr = 0 lr = 1e-1 time_step", "= np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) dWf,dRf,dbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) dWz,dRz,dbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) dWy,dby = np.zeros_like(Wy),np.zeros_like(by) dy_z,dy_f,dy_o,dy_i =", "dog_ = o_g[t]*(1-o_g[t])*dog dWo += np.outer(dog_,x) dRo += np.outer(dog_,ys[t-1]) dy_o = np.dot(Ro.T,dog_) dbo", "1 do = np.copy(p[t]) do[o[t],0] -= 1 dWy += np.outer(do,ys[t]) dby += do", "dzg_ = (1-z_g[t]*z_g[t])*dzg dWz += np.outer(dzg_,x) dRz += np.outer(dzg_,ys[t-1]) dbz += dzg_ dy_z", "if start_ptr+time_step>len_of_dataset: start_ptr = 0 y_prev = np.zeros((len_of_vocab,1)) else: input = [char_to_idx[c] for", "np.dot(Wy,y_p)+by p = softmax(os) id = np.random.choice(len_of_vocab,1,p=p.ravel())[0] idx.append(id) x = np.zeros((len_of_vocab,1)) x[id,0]=1 print(''.join([idx_to_char[c]", "= np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) dWy,dby = np.zeros_like(Wy),np.zeros_like(by) dy_z,dy_f,dy_o,dy_i = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) dcs_c = np.zeros((len_of_vocab,1)) for t", "start_ptr = 0 y_prev = np.zeros((len_of_vocab,1)) else: input = [char_to_idx[c] for c in", "+= dfg_ dog_ = o_g[t]*(1-o_g[t])*dog dWo += np.outer(dog_,x) dRo += np.outer(dog_,ys[t-1]) dy_o =", "import matplotlib.animation as animation plt.ion() dataset = open('../data/input.txt','r').read() #dataset = open('../data/code.txt','r').read() len_of_dataset =", "sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,y_p)+bo o_g = sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,y_p)+bf f_g", "sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,y_p)+bz z_g = np.tanh(Z) c_p = i_g*z_g +", "dzg = i_g[t]*dcs dfg = cs[t-1]*dcs dzg_ = (1-z_g[t]*z_g[t])*dzg dWz += np.outer(dzg_,x) dRz", "[] x = np.zeros((len_of_vocab,1)) x[10,0] = np.random.randint(0,len_of_vocab) for t in range(200): I =", "np.tanh(Z) cs[t] = i_g[t]*z_g[t] + f_g[t] *cs[t-1] ys[t] = o_g[t] * np.tanh(cs[t]) os[t]", "#dataset = open('../data/code.txt','r').read() len_of_dataset = len(dataset) print('len of dataset:',len_of_dataset) vocab = set(dataset) len_of_vocab", "c in dataset[start_ptr+1:start_ptr+time_step+1]] loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,y_prev,c_prev=forward_backward_pass(i=input,o=output,y_p=y_prev,c_p=c_prev) for params,dparams,mparams in zip([Wi,Ri,bi,Wo,Ro,bo,Wf,Rf,bf,Wz,Rz,bz,Wy,by],\\ [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby],[mWi,mRi,mbi,mWo,mRo,mbo,mWf,mRf,mbf,mWz,mRz,mbz,mWy,mby]): mparams += dparams*dparams params", "= dy + dy_z + dy_f + dy_i + dy_o dcs = o_g[t]", "(1-z_g[t]*z_g[t])*dzg dWz += np.outer(dzg_,x) dRz += np.outer(dzg_,ys[t-1]) dbz += dzg_ dy_z = np.dot(Rz.T,dzg_)", "= np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) mWz,mRz,mbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) mWy,mby = np.zeros_like(Wy),np.zeros_like(by) def sample(y_p,c_p): idx = []", "mWf,mRf,mbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) mWz,mRz,mbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) mWy,mby = np.zeros_like(Wy),np.zeros_like(by) def sample(y_p,c_p): idx =", "= o_g * np.tanh(c_p) os = np.dot(Wy,y_p)+by p = softmax(os) id = np.random.choice(len_of_vocab,1,p=p.ravel())[0]", "np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wz,Rz,bz = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wy,by = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.zeros((len_of_vocab,1)) mWi,mRi,mbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) mWo,mRo,mbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo)", "= [] x = np.zeros((len_of_vocab,1)) x[10,0] = np.random.randint(0,len_of_vocab) for t in range(200): I", "vocab:',len_of_vocab) char_to_idx = {char:idx for idx,char in enumerate(vocab)} idx_to_char = {idx:char for idx,char", "+= -np.log(p[t][o[t],0]) dWi,dRi,dbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) dWo,dRo,dbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) dWf,dRf,dbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) dWz,dRz,dbz =", "= sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,ys[t-1])+bo o_g[t] = sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,ys[t-1])+bf", "np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) n = 0 x=[] y=[] smooth_loss = -np.log(1/len_of_vocab)*time_step while n<=epoches: if start_ptr+time_step>len_of_dataset:", "z_g = {} os = {} cs[-1] = np.copy(c_p) ys[-1] = np.copy(y_p) p", "np.zeros_like(Wy),np.zeros_like(by) def sample(y_p,c_p): idx = [] x = np.zeros((len_of_vocab,1)) x[10,0] = np.random.randint(0,len_of_vocab) for", "= 1 do = np.copy(p[t]) do[o[t],0] -= 1 dWy += np.outer(do,ys[t]) dby +=", "for params,dparams,mparams in zip([Wi,Ri,bi,Wo,Ro,bo,Wf,Rf,bf,Wz,Rz,bz,Wy,by],\\ [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby],[mWi,mRi,mbi,mWo,mRo,mbo,mWf,mRf,mbf,mWz,mRz,mbz,mWy,mby]): mparams += dparams*dparams params += -lr*dparams/np.sqrt(mparams+1e-8) smooth_loss =", "x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1 do = np.copy(p[t]) do[o[t],0] -= 1 dWy", "= np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) mWf,mRf,mbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) mWz,mRz,mbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) mWy,mby = np.zeros_like(Wy),np.zeros_like(by) def sample(y_p,c_p):", "i_g*z_g + f_g *c_p y_p = o_g * np.tanh(c_p) os = np.dot(Wy,y_p)+by p", "print('len of dataset:',len_of_dataset) vocab = set(dataset) len_of_vocab = len(vocab) print('len of vocab:',len_of_vocab) char_to_idx", "return loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,ys[time_step-1],cs[time_step-1] y_prev,c_prev = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) n = 0 x=[] y=[] smooth_loss = -np.log(1/len_of_vocab)*time_step", "{} ys = {} i_g = {} o_g = {} f_g = {}", "I = np.dot(Wi,x)+np.dot(Ri,y_p)+bi i_g = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,y_p)+bo o_g = sigmoid(O) F", "in [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz]: np.clip(param,-1,1,out=param) return loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,ys[time_step-1],cs[time_step-1] y_prev,c_prev = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) n = 0 x=[] y=[]", "= np.tanh(Z) cs[t] = i_g[t]*z_g[t] + f_g[t] *cs[t-1] ys[t] = o_g[t] * np.tanh(cs[t])", "i_g[t]*z_g[t] + f_g[t] *cs[t-1] ys[t] = o_g[t] * np.tanh(cs[t]) os[t] = np.dot(Wy,ys[t])+by p[t]", "ys[t] = o_g[t] * np.tanh(cs[t]) os[t] = np.dot(Wy,ys[t])+by p[t] = softmax(os[t]) loss +=", "1 dWy += np.outer(do,ys[t]) dby += do dy = np.dot(Wy,do) dy = dy", "matplotlib.animation as animation plt.ion() dataset = open('../data/input.txt','r').read() #dataset = open('../data/code.txt','r').read() len_of_dataset = len(dataset)", "= np.random.choice(len_of_vocab,1,p=p.ravel())[0] idx.append(id) x = np.zeros((len_of_vocab,1)) x[id,0]=1 print(''.join([idx_to_char[c] for c in idx])) def", "dig_ = i_g[t]*(1-i_g[t])*dig dWi += np.outer(dig_,x) dRi += np.outer(dig_,ys[t-1]) dy_i = np.dot(Ri.T,dig_) dbi", "np import matplotlib.pyplot as plt import matplotlib.animation as animation plt.ion() dataset = open('../data/input.txt','r').read()", "= np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wz,Rz,bz = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wy,by = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.zeros((len_of_vocab,1)) mWi,mRi,mbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) mWo,mRo,mbo =", "cs[t-1]*dcs dzg_ = (1-z_g[t]*z_g[t])*dzg dWz += np.outer(dzg_,x) dRz += np.outer(dzg_,ys[t-1]) dbz += dzg_", "Wi,Ri,bi = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wo,Ro,bo = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wf,Rf,bf = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wz,Rz,bz = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wy,by", "id = np.random.choice(len_of_vocab,1,p=p.ravel())[0] idx.append(id) x = np.zeros((len_of_vocab,1)) x[id,0]=1 print(''.join([idx_to_char[c] for c in idx]))", "= o_g[t]*(1-o_g[t])*dog dWo += np.outer(dog_,x) dRo += np.outer(dog_,ys[t-1]) dy_o = np.dot(Ro.T,dog_) dbo +=", "np.clip(param,-1,1,out=param) return loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,ys[time_step-1],cs[time_step-1] y_prev,c_prev = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) n = 0 x=[] y=[] smooth_loss =", "np.dot(Wy,do) dy = dy + dy_z + dy_f + dy_i + dy_o dcs", "as plt import matplotlib.animation as animation plt.ion() dataset = open('../data/input.txt','r').read() #dataset = open('../data/code.txt','r').read()", "= {char:idx for idx,char in enumerate(vocab)} idx_to_char = {idx:char for idx,char in enumerate(vocab)}", "+ f_g *c_p y_p = o_g * np.tanh(c_p) os = np.dot(Wy,y_p)+by p =", "+= do dy = np.dot(Wy,do) dy = dy + dy_z + dy_f +", "dy_o = np.dot(Ro.T,dog_) dbo += dog_ dig_ = i_g[t]*(1-i_g[t])*dig dWi += np.outer(dig_,x) dRi", "return 1/(1+np.exp(-x)) def softmax(x): return np.exp(x)/np.sum(np.exp(x)) Wi,Ri,bi = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wo,Ro,bo = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wf,Rf,bf", "dbi += dig_ for param in [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz]: np.clip(param,-1,1,out=param) return loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,ys[time_step-1],cs[time_step-1] y_prev,c_prev = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1))", "dfg_ = f_g[t] * (1-f_g[t])*dfg dWf += np.outer(dfg_,x) dRf += np.outer(dfg_,ys[t-1]) dy_f =", "o_g = sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,y_p)+bf f_g = sigmoid(F) Z =", "dcs_c = np.zeros((len_of_vocab,1)) for t in reversed(range(time_step)): x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1", "dy = np.dot(Wy,do) dy = dy + dy_z + dy_f + dy_i +", "np.dot(Rz.T,dzg_) dfg_ = f_g[t] * (1-f_g[t])*dfg dWf += np.outer(dfg_,x) dRf += np.outer(dfg_,ys[t-1]) dy_f", "dataset:',len_of_dataset) vocab = set(dataset) len_of_vocab = len(vocab) print('len of vocab:',len_of_vocab) char_to_idx = {char:idx", "dWi,dRi,dbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) dWo,dRo,dbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) dWf,dRf,dbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) dWz,dRz,dbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) dWy,dby", "= -np.log(1/len_of_vocab)*time_step while n<=epoches: if start_ptr+time_step>len_of_dataset: start_ptr = 0 y_prev = np.zeros((len_of_vocab,1)) else:", "i_g[t] = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,ys[t-1])+bo o_g[t] = sigmoid(O) F = np.dot(Wf,x) +", "1/(1+np.exp(-x)) def softmax(x): return np.exp(x)/np.sum(np.exp(x)) Wi,Ri,bi = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wo,Ro,bo = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wf,Rf,bf =", "x[i[t],0] = 1 do = np.copy(p[t]) do[o[t],0] -= 1 dWy += np.outer(do,ys[t]) dby", "= np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) mWo,mRo,mbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) mWf,mRf,mbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) mWz,mRz,mbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) mWy,mby =", "char_to_idx = {char:idx for idx,char in enumerate(vocab)} idx_to_char = {idx:char for idx,char in", "np.outer(dfg_,ys[t-1]) dy_f = np.dot(Rf.T,dfg_) dbf += dfg_ dog_ = o_g[t]*(1-o_g[t])*dog dWo += np.outer(dog_,x)", "* (1-np.tanh(cs[t])*np.tanh(cs[t]))*dy + dcs_c dcs_c = f_g[t]*dcs dig = z_g[t]*dcs dog = np.tanh(cs[t])*dy", "= np.dot(Ri.T,dig_) dbi += dig_ for param in [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz]: np.clip(param,-1,1,out=param) return loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,ys[time_step-1],cs[time_step-1] y_prev,c_prev", "F = np.dot(Wf,x) + np.dot(Rf,ys[t-1])+bf f_g[t] = sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,ys[t-1])+bz", "= np.dot(Wz,x) + np.dot(Rz,ys[t-1])+bz z_g[t] = np.tanh(Z) cs[t] = i_g[t]*z_g[t] + f_g[t] *cs[t-1]", "x[id,0]=1 print(''.join([idx_to_char[c] for c in idx])) def forward_backward_pass(i,o,y_p,c_p): cs = {} ys =", "t in range(time_step): x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1 I = np.dot(Wi,x)+np.dot(Ri,ys[t-1])+bi i_g[t]", "for c in dataset[start_ptr+1:start_ptr+time_step+1]] loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,y_prev,c_prev=forward_backward_pass(i=input,o=output,y_p=y_prev,c_p=c_prev) for params,dparams,mparams in zip([Wi,Ri,bi,Wo,Ro,bo,Wf,Rf,bf,Wz,Rz,bz,Wy,by],\\ [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby],[mWi,mRi,mbi,mWo,mRo,mbo,mWf,mRf,mbf,mWz,mRz,mbz,mWy,mby]): mparams += dparams*dparams", "in range(200): I = np.dot(Wi,x)+np.dot(Ri,y_p)+bi i_g = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,y_p)+bo o_g =", "[dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby],[mWi,mRi,mbi,mWo,mRo,mbo,mWf,mRf,mbf,mWz,mRz,mbz,mWy,mby]): mparams += dparams*dparams params += -lr*dparams/np.sqrt(mparams+1e-8) smooth_loss = (0.999*smooth_loss)+(0.001*loss) x.append(n) y.append(smooth_loss) if", "f_g[t] * (1-f_g[t])*dfg dWf += np.outer(dfg_,x) dRf += np.outer(dfg_,ys[t-1]) dy_f = np.dot(Rf.T,dfg_) dbf", "= 0 x=[] y=[] smooth_loss = -np.log(1/len_of_vocab)*time_step while n<=epoches: if start_ptr+time_step>len_of_dataset: start_ptr =", "dWo += np.outer(dog_,x) dRo += np.outer(dog_,ys[t-1]) dy_o = np.dot(Ro.T,dog_) dbo += dog_ dig_", "np.dot(Wf,x) + np.dot(Rf,ys[t-1])+bf f_g[t] = sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,ys[t-1])+bz z_g[t] =", "+ dy_o dcs = o_g[t] * (1-np.tanh(cs[t])*np.tanh(cs[t]))*dy + dcs_c dcs_c = f_g[t]*dcs dig", "= np.zeros_like(Wy),np.zeros_like(by) def sample(y_p,c_p): idx = [] x = np.zeros((len_of_vocab,1)) x[10,0] = np.random.randint(0,len_of_vocab)", "np.zeros((len_of_vocab,1)) x[10,0] = np.random.randint(0,len_of_vocab) for t in range(200): I = np.dot(Wi,x)+np.dot(Ri,y_p)+bi i_g =", "p[t] = softmax(os[t]) loss += -np.log(p[t][o[t],0]) dWi,dRi,dbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) dWo,dRo,dbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) dWf,dRf,dbf", "p = {} loss = 0 for t in range(time_step): x = np.zeros((len_of_vocab,1))", "dog_ dig_ = i_g[t]*(1-i_g[t])*dig dWi += np.outer(dig_,x) dRi += np.outer(dig_,ys[t-1]) dy_i = np.dot(Ri.T,dig_)", "vocab = set(dataset) len_of_vocab = len(vocab) print('len of vocab:',len_of_vocab) char_to_idx = {char:idx for", "lr = 1e-1 time_step = 25 mean =0.0 std =0.01 epoches = 10000", "print(''.join([idx_to_char[c] for c in idx])) def forward_backward_pass(i,o,y_p,c_p): cs = {} ys = {}", "output = [char_to_idx[c] for c in dataset[start_ptr+1:start_ptr+time_step+1]] loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,y_prev,c_prev=forward_backward_pass(i=input,o=output,y_p=y_prev,c_p=c_prev) for params,dparams,mparams in zip([Wi,Ri,bi,Wo,Ro,bo,Wf,Rf,bf,Wz,Rz,bz,Wy,by],\\ [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby],[mWi,mRi,mbi,mWo,mRo,mbo,mWf,mRf,mbf,mWz,mRz,mbz,mWy,mby]):", "np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) dWo,dRo,dbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) dWf,dRf,dbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) dWz,dRz,dbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) dWy,dby = np.zeros_like(Wy),np.zeros_like(by)", "np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) mWo,mRo,mbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) mWf,mRf,mbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) mWz,mRz,mbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) mWy,mby = np.zeros_like(Wy),np.zeros_like(by)", "idx])) def forward_backward_pass(i,o,y_p,c_p): cs = {} ys = {} i_g = {} o_g", "= {} loss = 0 for t in range(time_step): x = np.zeros((len_of_vocab,1)) x[i[t],0]", "= np.dot(Wy,do) dy = dy + dy_z + dy_f + dy_i + dy_o", "+= np.outer(do,ys[t]) dby += do dy = np.dot(Wy,do) dy = dy + dy_z", "1e-1 time_step = 25 mean =0.0 std =0.01 epoches = 10000 def sigmoid(x):", "= sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,ys[t-1])+bf f_g[t] = sigmoid(F) Z = np.dot(Wz,x)", "+= np.outer(dfg_,x) dRf += np.outer(dfg_,ys[t-1]) dy_f = np.dot(Rf.T,dfg_) dbf += dfg_ dog_ =", "y_prev = np.zeros((len_of_vocab,1)) else: input = [char_to_idx[c] for c in dataset[start_ptr:start_ptr+time_step]] output =", "dfg_ dog_ = o_g[t]*(1-o_g[t])*dog dWo += np.outer(dog_,x) dRo += np.outer(dog_,ys[t-1]) dy_o = np.dot(Ro.T,dog_)", "f_g *c_p y_p = o_g * np.tanh(c_p) os = np.dot(Wy,y_p)+by p = softmax(os)", "dy_z = np.dot(Rz.T,dzg_) dfg_ = f_g[t] * (1-f_g[t])*dfg dWf += np.outer(dfg_,x) dRf +=", "dig_ for param in [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz]: np.clip(param,-1,1,out=param) return loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,ys[time_step-1],cs[time_step-1] y_prev,c_prev = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) n =", "in dataset[start_ptr:start_ptr+time_step]] output = [char_to_idx[c] for c in dataset[start_ptr+1:start_ptr+time_step+1]] loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,y_prev,c_prev=forward_backward_pass(i=input,o=output,y_p=y_prev,c_p=c_prev) for params,dparams,mparams in", "+ dy_i + dy_o dcs = o_g[t] * (1-np.tanh(cs[t])*np.tanh(cs[t]))*dy + dcs_c dcs_c =", "c_p = i_g*z_g + f_g *c_p y_p = o_g * np.tanh(c_p) os =", "for idx,char in enumerate(vocab)} idx_to_char = {idx:char for idx,char in enumerate(vocab)} print('char_to_idx:',char_to_idx) print('idx_to_char:',idx_to_char)", "= [char_to_idx[c] for c in dataset[start_ptr+1:start_ptr+time_step+1]] loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,y_prev,c_prev=forward_backward_pass(i=input,o=output,y_p=y_prev,c_p=c_prev) for params,dparams,mparams in zip([Wi,Ri,bi,Wo,Ro,bo,Wf,Rf,bf,Wz,Rz,bz,Wy,by],\\ [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby],[mWi,mRi,mbi,mWo,mRo,mbo,mWf,mRf,mbf,mWz,mRz,mbz,mWy,mby]): mparams", "start_ptr+time_step>len_of_dataset: start_ptr = 0 y_prev = np.zeros((len_of_vocab,1)) else: input = [char_to_idx[c] for c", "np.outer(dzg_,ys[t-1]) dbz += dzg_ dy_z = np.dot(Rz.T,dzg_) dfg_ = f_g[t] * (1-f_g[t])*dfg dWf", "animation plt.ion() dataset = open('../data/input.txt','r').read() #dataset = open('../data/code.txt','r').read() len_of_dataset = len(dataset) print('len of", "np.dot(Wo,x)+np.dot(Ro,ys[t-1])+bo o_g[t] = sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,ys[t-1])+bf f_g[t] = sigmoid(F) Z", "= np.zeros_like(Wy),np.zeros_like(by) dy_z,dy_f,dy_o,dy_i = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) dcs_c = np.zeros((len_of_vocab,1)) for t in reversed(range(time_step)): x", "= (1-z_g[t]*z_g[t])*dzg dWz += np.outer(dzg_,x) dRz += np.outer(dzg_,ys[t-1]) dbz += dzg_ dy_z =", "{} f_g = {} z_g = {} os = {} cs[-1] = np.copy(c_p)", "set(dataset) len_of_vocab = len(vocab) print('len of vocab:',len_of_vocab) char_to_idx = {char:idx for idx,char in", "std =0.01 epoches = 10000 def sigmoid(x): return 1/(1+np.exp(-x)) def softmax(x): return np.exp(x)/np.sum(np.exp(x))", "np.outer(dog_,x) dRo += np.outer(dog_,ys[t-1]) dy_o = np.dot(Ro.T,dog_) dbo += dog_ dig_ = i_g[t]*(1-i_g[t])*dig", "= np.copy(c_p) ys[-1] = np.copy(y_p) p = {} loss = 0 for t", "y.append(smooth_loss) if n%1000 == 0: print('smooth_loss:',loss) sample(y_p=y_prev,c_p=c_prev) plt.ylabel('Loss') plt.xlabel('Epoch') plt.plot(x,y,color='r') plt.pause(1e-9) n+=1 start_ptr", "np.zeros((len_of_vocab,1)) x[i[t],0] = 1 do = np.copy(p[t]) do[o[t],0] -= 1 dWy += np.outer(do,ys[t])", "loss += -np.log(p[t][o[t],0]) dWi,dRi,dbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) dWo,dRo,dbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) dWf,dRf,dbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) dWz,dRz,dbz", "do dy = np.dot(Wy,do) dy = dy + dy_z + dy_f + dy_i", "+= dzg_ dy_z = np.dot(Rz.T,dzg_) dfg_ = f_g[t] * (1-f_g[t])*dfg dWf += np.outer(dfg_,x)", "o_g[t] * np.tanh(cs[t]) os[t] = np.dot(Wy,ys[t])+by p[t] = softmax(os[t]) loss += -np.log(p[t][o[t],0]) dWi,dRi,dbi", "= np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) dWz,dRz,dbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) dWy,dby = np.zeros_like(Wy),np.zeros_like(by) dy_z,dy_f,dy_o,dy_i = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) dcs_c =", "+= dparams*dparams params += -lr*dparams/np.sqrt(mparams+1e-8) smooth_loss = (0.999*smooth_loss)+(0.001*loss) x.append(n) y.append(smooth_loss) if n%1000 ==", "np.dot(Ri.T,dig_) dbi += dig_ for param in [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz]: np.clip(param,-1,1,out=param) return loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,ys[time_step-1],cs[time_step-1] y_prev,c_prev =", "Wz,Rz,bz = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wy,by = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.zeros((len_of_vocab,1)) mWi,mRi,mbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) mWo,mRo,mbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) mWf,mRf,mbf", "{} loss = 0 for t in range(time_step): x = np.zeros((len_of_vocab,1)) x[i[t],0] =", "= np.dot(Wi,x)+np.dot(Ri,y_p)+bi i_g = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,y_p)+bo o_g = sigmoid(O) F =", "= i_g[t]*dcs dfg = cs[t-1]*dcs dzg_ = (1-z_g[t]*z_g[t])*dzg dWz += np.outer(dzg_,x) dRz +=", "reversed(range(time_step)): x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1 do = np.copy(p[t]) do[o[t],0] -= 1", "= {} i_g = {} o_g = {} f_g = {} z_g =", "= np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) dWo,dRo,dbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) dWf,dRf,dbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) dWz,dRz,dbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) dWy,dby =", "(1-f_g[t])*dfg dWf += np.outer(dfg_,x) dRf += np.outer(dfg_,ys[t-1]) dy_f = np.dot(Rf.T,dfg_) dbf += dfg_", "np.zeros((len_of_vocab,1)) for t in reversed(range(time_step)): x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1 do =", "= 1 I = np.dot(Wi,x)+np.dot(Ri,ys[t-1])+bi i_g[t] = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,ys[t-1])+bo o_g[t] =", "return np.exp(x)/np.sum(np.exp(x)) Wi,Ri,bi = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wo,Ro,bo = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wf,Rf,bf = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wz,Rz,bz =", "= 0 for t in range(time_step): x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1 I", "params,dparams,mparams in zip([Wi,Ri,bi,Wo,Ro,bo,Wf,Rf,bf,Wz,Rz,bz,Wy,by],\\ [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby],[mWi,mRi,mbi,mWo,mRo,mbo,mWf,mRf,mbf,mWz,mRz,mbz,mWy,mby]): mparams += dparams*dparams params += -lr*dparams/np.sqrt(mparams+1e-8) smooth_loss = (0.999*smooth_loss)+(0.001*loss)", "dWf,dRf,dbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) dWz,dRz,dbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) dWy,dby = np.zeros_like(Wy),np.zeros_like(by) dy_z,dy_f,dy_o,dy_i = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) dcs_c", "dparams*dparams params += -lr*dparams/np.sqrt(mparams+1e-8) smooth_loss = (0.999*smooth_loss)+(0.001*loss) x.append(n) y.append(smooth_loss) if n%1000 == 0:", "np.random.randint(0,len_of_vocab) for t in range(200): I = np.dot(Wi,x)+np.dot(Ri,y_p)+bi i_g = sigmoid(I) O =", "* (1-f_g[t])*dfg dWf += np.outer(dfg_,x) dRf += np.outer(dfg_,ys[t-1]) dy_f = np.dot(Rf.T,dfg_) dbf +=", "np.zeros((len_of_vocab,1)) else: input = [char_to_idx[c] for c in dataset[start_ptr:start_ptr+time_step]] output = [char_to_idx[c] for", "= open('../data/code.txt','r').read() len_of_dataset = len(dataset) print('len of dataset:',len_of_dataset) vocab = set(dataset) len_of_vocab =", "enumerate(vocab)} print('char_to_idx:',char_to_idx) print('idx_to_char:',idx_to_char) start_ptr = 0 lr = 1e-1 time_step = 25 mean", "i_g = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,y_p)+bo o_g = sigmoid(O) F = np.dot(Wf,x) +", "+ np.dot(Rf,y_p)+bf f_g = sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,y_p)+bz z_g = np.tanh(Z)", "o_g[t] * (1-np.tanh(cs[t])*np.tanh(cs[t]))*dy + dcs_c dcs_c = f_g[t]*dcs dig = z_g[t]*dcs dog =", "def softmax(x): return np.exp(x)/np.sum(np.exp(x)) Wi,Ri,bi = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wo,Ro,bo = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wf,Rf,bf = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1))", "np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wo,Ro,bo = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wf,Rf,bf = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wz,Rz,bz = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wy,by = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.zeros((len_of_vocab,1))", "cs = {} ys = {} i_g = {} o_g = {} f_g", "+ dcs_c dcs_c = f_g[t]*dcs dig = z_g[t]*dcs dog = np.tanh(cs[t])*dy dzg =", "== 0: print('smooth_loss:',loss) sample(y_p=y_prev,c_p=c_prev) plt.ylabel('Loss') plt.xlabel('Epoch') plt.plot(x,y,color='r') plt.pause(1e-9) n+=1 start_ptr += time_step plt.savefig('../Performance/lstm_without_peephole.png')", "= np.copy(y_p) p = {} loss = 0 for t in range(time_step): x", "softmax(os) id = np.random.choice(len_of_vocab,1,p=p.ravel())[0] idx.append(id) x = np.zeros((len_of_vocab,1)) x[id,0]=1 print(''.join([idx_to_char[c] for c in", "dataset[start_ptr:start_ptr+time_step]] output = [char_to_idx[c] for c in dataset[start_ptr+1:start_ptr+time_step+1]] loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,y_prev,c_prev=forward_backward_pass(i=input,o=output,y_p=y_prev,c_p=c_prev) for params,dparams,mparams in zip([Wi,Ri,bi,Wo,Ro,bo,Wf,Rf,bf,Wz,Rz,bz,Wy,by],\\", "= sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,y_p)+bo o_g = sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,y_p)+bf", "loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,ys[time_step-1],cs[time_step-1] y_prev,c_prev = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) n = 0 x=[] y=[] smooth_loss = -np.log(1/len_of_vocab)*time_step while", "= np.dot(Wf,x) + np.dot(Rf,ys[t-1])+bf f_g[t] = sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,ys[t-1])+bz z_g[t]", "= sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,y_p)+bz z_g = np.tanh(Z) c_p = i_g*z_g", "= np.tanh(cs[t])*dy dzg = i_g[t]*dcs dfg = cs[t-1]*dcs dzg_ = (1-z_g[t]*z_g[t])*dzg dWz +=", "+ np.dot(Rz,ys[t-1])+bz z_g[t] = np.tanh(Z) cs[t] = i_g[t]*z_g[t] + f_g[t] *cs[t-1] ys[t] =", "f_g[t] *cs[t-1] ys[t] = o_g[t] * np.tanh(cs[t]) os[t] = np.dot(Wy,ys[t])+by p[t] = softmax(os[t])", "= np.random.randint(0,len_of_vocab) for t in range(200): I = np.dot(Wi,x)+np.dot(Ri,y_p)+bi i_g = sigmoid(I) O", "np.dot(Wo,x)+np.dot(Ro,y_p)+bo o_g = sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,y_p)+bf f_g = sigmoid(F) Z", "dy_f + dy_i + dy_o dcs = o_g[t] * (1-np.tanh(cs[t])*np.tanh(cs[t]))*dy + dcs_c dcs_c", "*c_p y_p = o_g * np.tanh(c_p) os = np.dot(Wy,y_p)+by p = softmax(os) id", "c in dataset[start_ptr:start_ptr+time_step]] output = [char_to_idx[c] for c in dataset[start_ptr+1:start_ptr+time_step+1]] loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,y_prev,c_prev=forward_backward_pass(i=input,o=output,y_p=y_prev,c_p=c_prev) for params,dparams,mparams", "for c in idx])) def forward_backward_pass(i,o,y_p,c_p): cs = {} ys = {} i_g", "n = 0 x=[] y=[] smooth_loss = -np.log(1/len_of_vocab)*time_step while n<=epoches: if start_ptr+time_step>len_of_dataset: start_ptr", "dcs_c dcs_c = f_g[t]*dcs dig = z_g[t]*dcs dog = np.tanh(cs[t])*dy dzg = i_g[t]*dcs", "= np.zeros((len_of_vocab,1)) x[i[t],0] = 1 do = np.copy(p[t]) do[o[t],0] -= 1 dWy +=", "=0.01 epoches = 10000 def sigmoid(x): return 1/(1+np.exp(-x)) def softmax(x): return np.exp(x)/np.sum(np.exp(x)) Wi,Ri,bi", "dy_i + dy_o dcs = o_g[t] * (1-np.tanh(cs[t])*np.tanh(cs[t]))*dy + dcs_c dcs_c = f_g[t]*dcs", "= z_g[t]*dcs dog = np.tanh(cs[t])*dy dzg = i_g[t]*dcs dfg = cs[t-1]*dcs dzg_ =", "[char_to_idx[c] for c in dataset[start_ptr:start_ptr+time_step]] output = [char_to_idx[c] for c in dataset[start_ptr+1:start_ptr+time_step+1]] loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,y_prev,c_prev=forward_backward_pass(i=input,o=output,y_p=y_prev,c_p=c_prev)", "print('idx_to_char:',idx_to_char) start_ptr = 0 lr = 1e-1 time_step = 25 mean =0.0 std", "idx,char in enumerate(vocab)} idx_to_char = {idx:char for idx,char in enumerate(vocab)} print('char_to_idx:',char_to_idx) print('idx_to_char:',idx_to_char) start_ptr", "= sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,y_p)+bf f_g = sigmoid(F) Z = np.dot(Wz,x)", "epoches = 10000 def sigmoid(x): return 1/(1+np.exp(-x)) def softmax(x): return np.exp(x)/np.sum(np.exp(x)) Wi,Ri,bi =", "-lr*dparams/np.sqrt(mparams+1e-8) smooth_loss = (0.999*smooth_loss)+(0.001*loss) x.append(n) y.append(smooth_loss) if n%1000 == 0: print('smooth_loss:',loss) sample(y_p=y_prev,c_p=c_prev) plt.ylabel('Loss')", "time_step = 25 mean =0.0 std =0.01 epoches = 10000 def sigmoid(x): return", "forward_backward_pass(i,o,y_p,c_p): cs = {} ys = {} i_g = {} o_g = {}", "+= dog_ dig_ = i_g[t]*(1-i_g[t])*dig dWi += np.outer(dig_,x) dRi += np.outer(dig_,ys[t-1]) dy_i =", "{} o_g = {} f_g = {} z_g = {} os = {}", "dWi += np.outer(dig_,x) dRi += np.outer(dig_,ys[t-1]) dy_i = np.dot(Ri.T,dig_) dbi += dig_ for", "mWo,mRo,mbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) mWf,mRf,mbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) mWz,mRz,mbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) mWy,mby = np.zeros_like(Wy),np.zeros_like(by) def", "dy_f = np.dot(Rf.T,dfg_) dbf += dfg_ dog_ = o_g[t]*(1-o_g[t])*dog dWo += np.outer(dog_,x) dRo", "of vocab:',len_of_vocab) char_to_idx = {char:idx for idx,char in enumerate(vocab)} idx_to_char = {idx:char for", "= 10000 def sigmoid(x): return 1/(1+np.exp(-x)) def softmax(x): return np.exp(x)/np.sum(np.exp(x)) Wi,Ri,bi = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1))", "x[10,0] = np.random.randint(0,len_of_vocab) for t in range(200): I = np.dot(Wi,x)+np.dot(Ri,y_p)+bi i_g = sigmoid(I)", "= np.dot(Wf,x) + np.dot(Rf,y_p)+bf f_g = sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,y_p)+bz z_g", "np.zeros_like(Wy),np.zeros_like(by) dy_z,dy_f,dy_o,dy_i = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) dcs_c = np.zeros((len_of_vocab,1)) for t in reversed(range(time_step)): x =", "i_g[t]*dcs dfg = cs[t-1]*dcs dzg_ = (1-z_g[t]*z_g[t])*dzg dWz += np.outer(dzg_,x) dRz += np.outer(dzg_,ys[t-1])", "as np import matplotlib.pyplot as plt import matplotlib.animation as animation plt.ion() dataset =", "I = np.dot(Wi,x)+np.dot(Ri,ys[t-1])+bi i_g[t] = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,ys[t-1])+bo o_g[t] = sigmoid(O) F", "= f_g[t]*dcs dig = z_g[t]*dcs dog = np.tanh(cs[t])*dy dzg = i_g[t]*dcs dfg =", "= np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) mWy,mby = np.zeros_like(Wy),np.zeros_like(by) def sample(y_p,c_p): idx = [] x = np.zeros((len_of_vocab,1))", "import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation plt.ion()", "= 1e-1 time_step = 25 mean =0.0 std =0.01 epoches = 10000 def", "dy_z + dy_f + dy_i + dy_o dcs = o_g[t] * (1-np.tanh(cs[t])*np.tanh(cs[t]))*dy +", "= set(dataset) len_of_vocab = len(vocab) print('len of vocab:',len_of_vocab) char_to_idx = {char:idx for idx,char", "np.dot(Rz,y_p)+bz z_g = np.tanh(Z) c_p = i_g*z_g + f_g *c_p y_p = o_g", "= o_g[t] * (1-np.tanh(cs[t])*np.tanh(cs[t]))*dy + dcs_c dcs_c = f_g[t]*dcs dig = z_g[t]*dcs dog", "= len(vocab) print('len of vocab:',len_of_vocab) char_to_idx = {char:idx for idx,char in enumerate(vocab)} idx_to_char", "= np.zeros((len_of_vocab,1)) x[10,0] = np.random.randint(0,len_of_vocab) for t in range(200): I = np.dot(Wi,x)+np.dot(Ri,y_p)+bi i_g", "idx_to_char = {idx:char for idx,char in enumerate(vocab)} print('char_to_idx:',char_to_idx) print('idx_to_char:',idx_to_char) start_ptr = 0 lr", "= np.zeros((len_of_vocab,1)) x[id,0]=1 print(''.join([idx_to_char[c] for c in idx])) def forward_backward_pass(i,o,y_p,c_p): cs = {}", "+ dy_z + dy_f + dy_i + dy_o dcs = o_g[t] * (1-np.tanh(cs[t])*np.tanh(cs[t]))*dy", "np.copy(p[t]) do[o[t],0] -= 1 dWy += np.outer(do,ys[t]) dby += do dy = np.dot(Wy,do)", "cs[-1] = np.copy(c_p) ys[-1] = np.copy(y_p) p = {} loss = 0 for", "for t in reversed(range(time_step)): x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1 do = np.copy(p[t])", "= np.zeros((len_of_vocab,1)) x[i[t],0] = 1 I = np.dot(Wi,x)+np.dot(Ri,ys[t-1])+bi i_g[t] = sigmoid(I) O =", "x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1 I = np.dot(Wi,x)+np.dot(Ri,ys[t-1])+bi i_g[t] = sigmoid(I) O", "np.dot(Wi,x)+np.dot(Ri,ys[t-1])+bi i_g[t] = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,ys[t-1])+bo o_g[t] = sigmoid(O) F = np.dot(Wf,x)", "in dataset[start_ptr+1:start_ptr+time_step+1]] loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,y_prev,c_prev=forward_backward_pass(i=input,o=output,y_p=y_prev,c_p=c_prev) for params,dparams,mparams in zip([Wi,Ri,bi,Wo,Ro,bo,Wf,Rf,bf,Wz,Rz,bz,Wy,by],\\ [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby],[mWi,mRi,mbi,mWo,mRo,mbo,mWf,mRf,mbf,mWz,mRz,mbz,mWy,mby]): mparams += dparams*dparams params +=", "dby += do dy = np.dot(Wy,do) dy = dy + dy_z + dy_f", "= np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) dcs_c = np.zeros((len_of_vocab,1)) for t in reversed(range(time_step)): x = np.zeros((len_of_vocab,1)) x[i[t],0]", "= np.zeros((len_of_vocab,1)) for t in reversed(range(time_step)): x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1 do", "x=[] y=[] smooth_loss = -np.log(1/len_of_vocab)*time_step while n<=epoches: if start_ptr+time_step>len_of_dataset: start_ptr = 0 y_prev", "dWy += np.outer(do,ys[t]) dby += do dy = np.dot(Wy,do) dy = dy +", "f_g[t]*dcs dig = z_g[t]*dcs dog = np.tanh(cs[t])*dy dzg = i_g[t]*dcs dfg = cs[t-1]*dcs", "= softmax(os) id = np.random.choice(len_of_vocab,1,p=p.ravel())[0] idx.append(id) x = np.zeros((len_of_vocab,1)) x[id,0]=1 print(''.join([idx_to_char[c] for c", "ys[-1] = np.copy(y_p) p = {} loss = 0 for t in range(time_step):", "sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,ys[t-1])+bz z_g[t] = np.tanh(Z) cs[t] = i_g[t]*z_g[t] +", "range(200): I = np.dot(Wi,x)+np.dot(Ri,y_p)+bi i_g = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,y_p)+bo o_g = sigmoid(O)", "[char_to_idx[c] for c in dataset[start_ptr+1:start_ptr+time_step+1]] loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,y_prev,c_prev=forward_backward_pass(i=input,o=output,y_p=y_prev,c_p=c_prev) for params,dparams,mparams in zip([Wi,Ri,bi,Wo,Ro,bo,Wf,Rf,bf,Wz,Rz,bz,Wy,by],\\ [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby],[mWi,mRi,mbi,mWo,mRo,mbo,mWf,mRf,mbf,mWz,mRz,mbz,mWy,mby]): mparams +=", "= np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wo,Ro,bo = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wf,Rf,bf = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wz,Rz,bz = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wy,by =", "= 0 lr = 1e-1 time_step = 25 mean =0.0 std =0.01 epoches", "open('../data/input.txt','r').read() #dataset = open('../data/code.txt','r').read() len_of_dataset = len(dataset) print('len of dataset:',len_of_dataset) vocab = set(dataset)", "for c in dataset[start_ptr:start_ptr+time_step]] output = [char_to_idx[c] for c in dataset[start_ptr+1:start_ptr+time_step+1]] loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,y_prev,c_prev=forward_backward_pass(i=input,o=output,y_p=y_prev,c_p=c_prev) for", "= np.dot(Wo,x)+np.dot(Ro,y_p)+bo o_g = sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,y_p)+bf f_g = sigmoid(F)", "Wf,Rf,bf = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wz,Rz,bz = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wy,by = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.zeros((len_of_vocab,1)) mWi,mRi,mbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) mWo,mRo,mbo", "x[i[t],0] = 1 I = np.dot(Wi,x)+np.dot(Ri,ys[t-1])+bi i_g[t] = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,ys[t-1])+bo o_g[t]", "param in [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz]: np.clip(param,-1,1,out=param) return loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,ys[time_step-1],cs[time_step-1] y_prev,c_prev = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) n = 0 x=[]", "os[t] = np.dot(Wy,ys[t])+by p[t] = softmax(os[t]) loss += -np.log(p[t][o[t],0]) dWi,dRi,dbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) dWo,dRo,dbo", "t in range(200): I = np.dot(Wi,x)+np.dot(Ri,y_p)+bi i_g = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,y_p)+bo o_g", "np.dot(Wf,x) + np.dot(Rf,y_p)+bf f_g = sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,y_p)+bz z_g =", "in enumerate(vocab)} idx_to_char = {idx:char for idx,char in enumerate(vocab)} print('char_to_idx:',char_to_idx) print('idx_to_char:',idx_to_char) start_ptr =", "mWi,mRi,mbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) mWo,mRo,mbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) mWf,mRf,mbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) mWz,mRz,mbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) mWy,mby", "np.tanh(cs[t])*dy dzg = i_g[t]*dcs dfg = cs[t-1]*dcs dzg_ = (1-z_g[t]*z_g[t])*dzg dWz += np.outer(dzg_,x)", "sample(y_p,c_p): idx = [] x = np.zeros((len_of_vocab,1)) x[10,0] = np.random.randint(0,len_of_vocab) for t in", "mWz,mRz,mbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) mWy,mby = np.zeros_like(Wy),np.zeros_like(by) def sample(y_p,c_p): idx = [] x =", "n<=epoches: if start_ptr+time_step>len_of_dataset: start_ptr = 0 y_prev = np.zeros((len_of_vocab,1)) else: input = [char_to_idx[c]", "np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wf,Rf,bf = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wz,Rz,bz = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wy,by = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.zeros((len_of_vocab,1)) mWi,mRi,mbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi)", "(0.999*smooth_loss)+(0.001*loss) x.append(n) y.append(smooth_loss) if n%1000 == 0: print('smooth_loss:',loss) sample(y_p=y_prev,c_p=c_prev) plt.ylabel('Loss') plt.xlabel('Epoch') plt.plot(x,y,color='r') plt.pause(1e-9)", "np.dot(Wi,x)+np.dot(Ri,y_p)+bi i_g = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,y_p)+bo o_g = sigmoid(O) F = np.dot(Wf,x)", "mWy,mby = np.zeros_like(Wy),np.zeros_like(by) def sample(y_p,c_p): idx = [] x = np.zeros((len_of_vocab,1)) x[10,0] =", "smooth_loss = (0.999*smooth_loss)+(0.001*loss) x.append(n) y.append(smooth_loss) if n%1000 == 0: print('smooth_loss:',loss) sample(y_p=y_prev,c_p=c_prev) plt.ylabel('Loss') plt.xlabel('Epoch')", "dWz += np.outer(dzg_,x) dRz += np.outer(dzg_,ys[t-1]) dbz += dzg_ dy_z = np.dot(Rz.T,dzg_) dfg_", "np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) dWz,dRz,dbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) dWy,dby = np.zeros_like(Wy),np.zeros_like(by) dy_z,dy_f,dy_o,dy_i = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) dcs_c = np.zeros((len_of_vocab,1))", "dbo += dog_ dig_ = i_g[t]*(1-i_g[t])*dig dWi += np.outer(dig_,x) dRi += np.outer(dig_,ys[t-1]) dy_i", "x = np.zeros((len_of_vocab,1)) x[10,0] = np.random.randint(0,len_of_vocab) for t in range(200): I = np.dot(Wi,x)+np.dot(Ri,y_p)+bi", "= {} z_g = {} os = {} cs[-1] = np.copy(c_p) ys[-1] =", "softmax(x): return np.exp(x)/np.sum(np.exp(x)) Wi,Ri,bi = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wo,Ro,bo = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wf,Rf,bf = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wz,Rz,bz", "params += -lr*dparams/np.sqrt(mparams+1e-8) smooth_loss = (0.999*smooth_loss)+(0.001*loss) x.append(n) y.append(smooth_loss) if n%1000 == 0: print('smooth_loss:',loss)", "of dataset:',len_of_dataset) vocab = set(dataset) len_of_vocab = len(vocab) print('len of vocab:',len_of_vocab) char_to_idx =", "F = np.dot(Wf,x) + np.dot(Rf,y_p)+bf f_g = sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,y_p)+bz", "np.exp(x)/np.sum(np.exp(x)) Wi,Ri,bi = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wo,Ro,bo = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wf,Rf,bf = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wz,Rz,bz = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1))", "sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,ys[t-1])+bf f_g[t] = sigmoid(F) Z = np.dot(Wz,x) +", "= np.dot(Wo,x)+np.dot(Ro,ys[t-1])+bo o_g[t] = sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,ys[t-1])+bf f_g[t] = sigmoid(F)", "= i_g[t]*z_g[t] + f_g[t] *cs[t-1] ys[t] = o_g[t] * np.tanh(cs[t]) os[t] = np.dot(Wy,ys[t])+by", "start_ptr = 0 lr = 1e-1 time_step = 25 mean =0.0 std =0.01", "dog = np.tanh(cs[t])*dy dzg = i_g[t]*dcs dfg = cs[t-1]*dcs dzg_ = (1-z_g[t]*z_g[t])*dzg dWz", "[dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz]: np.clip(param,-1,1,out=param) return loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,ys[time_step-1],cs[time_step-1] y_prev,c_prev = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) n = 0 x=[] y=[] smooth_loss", "z_g[t]*dcs dog = np.tanh(cs[t])*dy dzg = i_g[t]*dcs dfg = cs[t-1]*dcs dzg_ = (1-z_g[t]*z_g[t])*dzg", "<filename>code/lstm_without_peephole.py import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation", "dbf += dfg_ dog_ = o_g[t]*(1-o_g[t])*dog dWo += np.outer(dog_,x) dRo += np.outer(dog_,ys[t-1]) dy_o", "idx = [] x = np.zeros((len_of_vocab,1)) x[10,0] = np.random.randint(0,len_of_vocab) for t in range(200):", "= np.dot(Wi,x)+np.dot(Ri,ys[t-1])+bi i_g[t] = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,ys[t-1])+bo o_g[t] = sigmoid(O) F =", "dzg_ dy_z = np.dot(Rz.T,dzg_) dfg_ = f_g[t] * (1-f_g[t])*dfg dWf += np.outer(dfg_,x) dRf", "+= -lr*dparams/np.sqrt(mparams+1e-8) smooth_loss = (0.999*smooth_loss)+(0.001*loss) x.append(n) y.append(smooth_loss) if n%1000 == 0: print('smooth_loss:',loss) sample(y_p=y_prev,c_p=c_prev)", "= {} o_g = {} f_g = {} z_g = {} os =", "+= np.outer(dzg_,x) dRz += np.outer(dzg_,ys[t-1]) dbz += dzg_ dy_z = np.dot(Rz.T,dzg_) dfg_ =", "i_g = {} o_g = {} f_g = {} z_g = {} os", "for t in range(time_step): x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1 I = np.dot(Wi,x)+np.dot(Ri,ys[t-1])+bi", "np.random.choice(len_of_vocab,1,p=p.ravel())[0] idx.append(id) x = np.zeros((len_of_vocab,1)) x[id,0]=1 print(''.join([idx_to_char[c] for c in idx])) def forward_backward_pass(i,o,y_p,c_p):", "f_g = sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,y_p)+bz z_g = np.tanh(Z) c_p =", "= softmax(os[t]) loss += -np.log(p[t][o[t],0]) dWi,dRi,dbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) dWo,dRo,dbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) dWf,dRf,dbf =", "dataset[start_ptr+1:start_ptr+time_step+1]] loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,y_prev,c_prev=forward_backward_pass(i=input,o=output,y_p=y_prev,c_p=c_prev) for params,dparams,mparams in zip([Wi,Ri,bi,Wo,Ro,bo,Wf,Rf,bf,Wz,Rz,bz,Wy,by],\\ [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby],[mWi,mRi,mbi,mWo,mRo,mbo,mWf,mRf,mbf,mWz,mRz,mbz,mWy,mby]): mparams += dparams*dparams params += -lr*dparams/np.sqrt(mparams+1e-8)", "mean =0.0 std =0.01 epoches = 10000 def sigmoid(x): return 1/(1+np.exp(-x)) def softmax(x):", "t in reversed(range(time_step)): x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1 do = np.copy(p[t]) do[o[t],0]", "def forward_backward_pass(i,o,y_p,c_p): cs = {} ys = {} i_g = {} o_g =", "do = np.copy(p[t]) do[o[t],0] -= 1 dWy += np.outer(do,ys[t]) dby += do dy", "z_g = np.tanh(Z) c_p = i_g*z_g + f_g *c_p y_p = o_g *", "dbz += dzg_ dy_z = np.dot(Rz.T,dzg_) dfg_ = f_g[t] * (1-f_g[t])*dfg dWf +=", "np.outer(dfg_,x) dRf += np.outer(dfg_,ys[t-1]) dy_f = np.dot(Rf.T,dfg_) dbf += dfg_ dog_ = o_g[t]*(1-o_g[t])*dog", "dy_o dcs = o_g[t] * (1-np.tanh(cs[t])*np.tanh(cs[t]))*dy + dcs_c dcs_c = f_g[t]*dcs dig =", "Z = np.dot(Wz,x) + np.dot(Rz,ys[t-1])+bz z_g[t] = np.tanh(Z) cs[t] = i_g[t]*z_g[t] + f_g[t]", "Z = np.dot(Wz,x) + np.dot(Rz,y_p)+bz z_g = np.tanh(Z) c_p = i_g*z_g + f_g", "np.zeros((len_of_vocab,1)) x[i[t],0] = 1 I = np.dot(Wi,x)+np.dot(Ri,ys[t-1])+bi i_g[t] = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,ys[t-1])+bo", "0 lr = 1e-1 time_step = 25 mean =0.0 std =0.01 epoches =", "softmax(os[t]) loss += -np.log(p[t][o[t],0]) dWi,dRi,dbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) dWo,dRo,dbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) dWf,dRf,dbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf)", "do[o[t],0] -= 1 dWy += np.outer(do,ys[t]) dby += do dy = np.dot(Wy,do) dy", "ys = {} i_g = {} o_g = {} f_g = {} z_g", "= np.dot(Rf.T,dfg_) dbf += dfg_ dog_ = o_g[t]*(1-o_g[t])*dog dWo += np.outer(dog_,x) dRo +=", "os = np.dot(Wy,y_p)+by p = softmax(os) id = np.random.choice(len_of_vocab,1,p=p.ravel())[0] idx.append(id) x = np.zeros((len_of_vocab,1))", "np.tanh(c_p) os = np.dot(Wy,y_p)+by p = softmax(os) id = np.random.choice(len_of_vocab,1,p=p.ravel())[0] idx.append(id) x =", "= len(dataset) print('len of dataset:',len_of_dataset) vocab = set(dataset) len_of_vocab = len(vocab) print('len of", "= np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.zeros((len_of_vocab,1)) mWi,mRi,mbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) mWo,mRo,mbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) mWf,mRf,mbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) mWz,mRz,mbz =", "dataset = open('../data/input.txt','r').read() #dataset = open('../data/code.txt','r').read() len_of_dataset = len(dataset) print('len of dataset:',len_of_dataset) vocab", "= np.zeros((len_of_vocab,1)) else: input = [char_to_idx[c] for c in dataset[start_ptr:start_ptr+time_step]] output = [char_to_idx[c]", "in zip([Wi,Ri,bi,Wo,Ro,bo,Wf,Rf,bf,Wz,Rz,bz,Wy,by],\\ [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby],[mWi,mRi,mbi,mWo,mRo,mbo,mWf,mRf,mbf,mWz,mRz,mbz,mWy,mby]): mparams += dparams*dparams params += -lr*dparams/np.sqrt(mparams+1e-8) smooth_loss = (0.999*smooth_loss)+(0.001*loss) x.append(n)", "= {} ys = {} i_g = {} o_g = {} f_g =", "def sample(y_p,c_p): idx = [] x = np.zeros((len_of_vocab,1)) x[10,0] = np.random.randint(0,len_of_vocab) for t", "np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) dWy,dby = np.zeros_like(Wy),np.zeros_like(by) dy_z,dy_f,dy_o,dy_i = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) dcs_c = np.zeros((len_of_vocab,1)) for t in", "sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,y_p)+bf f_g = sigmoid(F) Z = np.dot(Wz,x) +", "matplotlib.pyplot as plt import matplotlib.animation as animation plt.ion() dataset = open('../data/input.txt','r').read() #dataset =", "f_g = {} z_g = {} os = {} cs[-1] = np.copy(c_p) ys[-1]", "+ f_g[t] *cs[t-1] ys[t] = o_g[t] * np.tanh(cs[t]) os[t] = np.dot(Wy,ys[t])+by p[t] =", "= np.tanh(Z) c_p = i_g*z_g + f_g *c_p y_p = o_g * np.tanh(c_p)", "= np.dot(Ro.T,dog_) dbo += dog_ dig_ = i_g[t]*(1-i_g[t])*dig dWi += np.outer(dig_,x) dRi +=", "=0.0 std =0.01 epoches = 10000 def sigmoid(x): return 1/(1+np.exp(-x)) def softmax(x): return", "= [char_to_idx[c] for c in dataset[start_ptr:start_ptr+time_step]] output = [char_to_idx[c] for c in dataset[start_ptr+1:start_ptr+time_step+1]]", "plt.ion() dataset = open('../data/input.txt','r').read() #dataset = open('../data/code.txt','r').read() len_of_dataset = len(dataset) print('len of dataset:',len_of_dataset)", "-np.log(p[t][o[t],0]) dWi,dRi,dbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) dWo,dRo,dbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) dWf,dRf,dbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) dWz,dRz,dbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz)", "c in idx])) def forward_backward_pass(i,o,y_p,c_p): cs = {} ys = {} i_g =", "O = np.dot(Wo,x)+np.dot(Ro,y_p)+bo o_g = sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,y_p)+bf f_g =", "= 25 mean =0.0 std =0.01 epoches = 10000 def sigmoid(x): return 1/(1+np.exp(-x))", "= {} os = {} cs[-1] = np.copy(c_p) ys[-1] = np.copy(y_p) p =", "{idx:char for idx,char in enumerate(vocab)} print('char_to_idx:',char_to_idx) print('idx_to_char:',idx_to_char) start_ptr = 0 lr = 1e-1", "dWo,dRo,dbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) dWf,dRf,dbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) dWz,dRz,dbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) dWy,dby = np.zeros_like(Wy),np.zeros_like(by) dy_z,dy_f,dy_o,dy_i", "= (0.999*smooth_loss)+(0.001*loss) x.append(n) y.append(smooth_loss) if n%1000 == 0: print('smooth_loss:',loss) sample(y_p=y_prev,c_p=c_prev) plt.ylabel('Loss') plt.xlabel('Epoch') plt.plot(x,y,color='r')", "if n%1000 == 0: print('smooth_loss:',loss) sample(y_p=y_prev,c_p=c_prev) plt.ylabel('Loss') plt.xlabel('Epoch') plt.plot(x,y,color='r') plt.pause(1e-9) n+=1 start_ptr +=", "np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) mWf,mRf,mbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) mWz,mRz,mbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) mWy,mby = np.zeros_like(Wy),np.zeros_like(by) def sample(y_p,c_p): idx", "10000 def sigmoid(x): return 1/(1+np.exp(-x)) def softmax(x): return np.exp(x)/np.sum(np.exp(x)) Wi,Ri,bi = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wo,Ro,bo", "{} i_g = {} o_g = {} f_g = {} z_g = {}", "o_g[t] = sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,ys[t-1])+bf f_g[t] = sigmoid(F) Z =", "np.dot(Rf,y_p)+bf f_g = sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,y_p)+bz z_g = np.tanh(Z) c_p", "np.tanh(Z) c_p = i_g*z_g + f_g *c_p y_p = o_g * np.tanh(c_p) os", "as animation plt.ion() dataset = open('../data/input.txt','r').read() #dataset = open('../data/code.txt','r').read() len_of_dataset = len(dataset) print('len", "(1-np.tanh(cs[t])*np.tanh(cs[t]))*dy + dcs_c dcs_c = f_g[t]*dcs dig = z_g[t]*dcs dog = np.tanh(cs[t])*dy dzg", "np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) mWy,mby = np.zeros_like(Wy),np.zeros_like(by) def sample(y_p,c_p): idx = [] x = np.zeros((len_of_vocab,1)) x[10,0]", "0 y_prev = np.zeros((len_of_vocab,1)) else: input = [char_to_idx[c] for c in dataset[start_ptr:start_ptr+time_step]] output", "-= 1 dWy += np.outer(do,ys[t]) dby += do dy = np.dot(Wy,do) dy =", "sigmoid(x): return 1/(1+np.exp(-x)) def softmax(x): return np.exp(x)/np.sum(np.exp(x)) Wi,Ri,bi = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wo,Ro,bo = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1))", "25 mean =0.0 std =0.01 epoches = 10000 def sigmoid(x): return 1/(1+np.exp(-x)) def", "np.copy(c_p) ys[-1] = np.copy(y_p) p = {} loss = 0 for t in", "enumerate(vocab)} idx_to_char = {idx:char for idx,char in enumerate(vocab)} print('char_to_idx:',char_to_idx) print('idx_to_char:',idx_to_char) start_ptr = 0", "dcs = o_g[t] * (1-np.tanh(cs[t])*np.tanh(cs[t]))*dy + dcs_c dcs_c = f_g[t]*dcs dig = z_g[t]*dcs", "np.dot(Wz,x) + np.dot(Rz,y_p)+bz z_g = np.tanh(Z) c_p = i_g*z_g + f_g *c_p y_p", "sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,ys[t-1])+bo o_g[t] = sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,ys[t-1])+bf f_g[t]", "dWy,dby = np.zeros_like(Wy),np.zeros_like(by) dy_z,dy_f,dy_o,dy_i = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) dcs_c = np.zeros((len_of_vocab,1)) for t in reversed(range(time_step)):", "z_g[t] = np.tanh(Z) cs[t] = i_g[t]*z_g[t] + f_g[t] *cs[t-1] ys[t] = o_g[t] *", "else: input = [char_to_idx[c] for c in dataset[start_ptr:start_ptr+time_step]] output = [char_to_idx[c] for c", "np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) dcs_c = np.zeros((len_of_vocab,1)) for t in reversed(range(time_step)): x = np.zeros((len_of_vocab,1)) x[i[t],0] =", "dy = dy + dy_z + dy_f + dy_i + dy_o dcs =", "dRo += np.outer(dog_,ys[t-1]) dy_o = np.dot(Ro.T,dog_) dbo += dog_ dig_ = i_g[t]*(1-i_g[t])*dig dWi", "= np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wy,by = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.zeros((len_of_vocab,1)) mWi,mRi,mbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) mWo,mRo,mbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) mWf,mRf,mbf =", "= o_g[t] * np.tanh(cs[t]) os[t] = np.dot(Wy,ys[t])+by p[t] = softmax(os[t]) loss += -np.log(p[t][o[t],0])", "O = np.dot(Wo,x)+np.dot(Ro,ys[t-1])+bo o_g[t] = sigmoid(O) F = np.dot(Wf,x) + np.dot(Rf,ys[t-1])+bf f_g[t] =", "= cs[t-1]*dcs dzg_ = (1-z_g[t]*z_g[t])*dzg dWz += np.outer(dzg_,x) dRz += np.outer(dzg_,ys[t-1]) dbz +=", "zip([Wi,Ri,bi,Wo,Ro,bo,Wf,Rf,bf,Wz,Rz,bz,Wy,by],\\ [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby],[mWi,mRi,mbi,mWo,mRo,mbo,mWf,mRf,mbf,mWz,mRz,mbz,mWy,mby]): mparams += dparams*dparams params += -lr*dparams/np.sqrt(mparams+1e-8) smooth_loss = (0.999*smooth_loss)+(0.001*loss) x.append(n) y.append(smooth_loss)", "o_g * np.tanh(c_p) os = np.dot(Wy,y_p)+by p = softmax(os) id = np.random.choice(len_of_vocab,1,p=p.ravel())[0] idx.append(id)", "{} os = {} cs[-1] = np.copy(c_p) ys[-1] = np.copy(y_p) p = {}", "dig = z_g[t]*dcs dog = np.tanh(cs[t])*dy dzg = i_g[t]*dcs dfg = cs[t-1]*dcs dzg_", "idx,char in enumerate(vocab)} print('char_to_idx:',char_to_idx) print('idx_to_char:',idx_to_char) start_ptr = 0 lr = 1e-1 time_step =", "{} cs[-1] = np.copy(c_p) ys[-1] = np.copy(y_p) p = {} loss = 0", "np.zeros((len_of_vocab,1)) x[id,0]=1 print(''.join([idx_to_char[c] for c in idx])) def forward_backward_pass(i,o,y_p,c_p): cs = {} ys", "= np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wf,Rf,bf = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wz,Rz,bz = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wy,by = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.zeros((len_of_vocab,1)) mWi,mRi,mbi =", "np.outer(dog_,ys[t-1]) dy_o = np.dot(Ro.T,dog_) dbo += dog_ dig_ = i_g[t]*(1-i_g[t])*dig dWi += np.outer(dig_,x)", "np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.zeros((len_of_vocab,1)) mWi,mRi,mbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) mWo,mRo,mbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) mWf,mRf,mbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) mWz,mRz,mbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz)", "n%1000 == 0: print('smooth_loss:',loss) sample(y_p=y_prev,c_p=c_prev) plt.ylabel('Loss') plt.xlabel('Epoch') plt.plot(x,y,color='r') plt.pause(1e-9) n+=1 start_ptr += time_step", "import matplotlib.pyplot as plt import matplotlib.animation as animation plt.ion() dataset = open('../data/input.txt','r').read() #dataset", "y_prev,c_prev = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) n = 0 x=[] y=[] smooth_loss = -np.log(1/len_of_vocab)*time_step while n<=epoches:", "open('../data/code.txt','r').read() len_of_dataset = len(dataset) print('len of dataset:',len_of_dataset) vocab = set(dataset) len_of_vocab = len(vocab)", "smooth_loss = -np.log(1/len_of_vocab)*time_step while n<=epoches: if start_ptr+time_step>len_of_dataset: start_ptr = 0 y_prev = np.zeros((len_of_vocab,1))", "y_p = o_g * np.tanh(c_p) os = np.dot(Wy,y_p)+by p = softmax(os) id =", "= 0 y_prev = np.zeros((len_of_vocab,1)) else: input = [char_to_idx[c] for c in dataset[start_ptr:start_ptr+time_step]]", "print('len of vocab:',len_of_vocab) char_to_idx = {char:idx for idx,char in enumerate(vocab)} idx_to_char = {idx:char", "= sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,ys[t-1])+bz z_g[t] = np.tanh(Z) cs[t] = i_g[t]*z_g[t]", "i_g[t]*(1-i_g[t])*dig dWi += np.outer(dig_,x) dRi += np.outer(dig_,ys[t-1]) dy_i = np.dot(Ri.T,dig_) dbi += dig_", "np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) mWz,mRz,mbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz) mWy,mby = np.zeros_like(Wy),np.zeros_like(by) def sample(y_p,c_p): idx = [] x", "mparams += dparams*dparams params += -lr*dparams/np.sqrt(mparams+1e-8) smooth_loss = (0.999*smooth_loss)+(0.001*loss) x.append(n) y.append(smooth_loss) if n%1000", "for t in range(200): I = np.dot(Wi,x)+np.dot(Ri,y_p)+bi i_g = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,y_p)+bo", "dRf += np.outer(dfg_,ys[t-1]) dy_f = np.dot(Rf.T,dfg_) dbf += dfg_ dog_ = o_g[t]*(1-o_g[t])*dog dWo", "+= np.outer(dig_,x) dRi += np.outer(dig_,ys[t-1]) dy_i = np.dot(Ri.T,dig_) dbi += dig_ for param", "np.copy(y_p) p = {} loss = 0 for t in range(time_step): x =", "-np.log(1/len_of_vocab)*time_step while n<=epoches: if start_ptr+time_step>len_of_dataset: start_ptr = 0 y_prev = np.zeros((len_of_vocab,1)) else: input", "f_g[t] = sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,ys[t-1])+bz z_g[t] = np.tanh(Z) cs[t] =", "np.outer(do,ys[t]) dby += do dy = np.dot(Wy,do) dy = dy + dy_z +", "p = softmax(os) id = np.random.choice(len_of_vocab,1,p=p.ravel())[0] idx.append(id) x = np.zeros((len_of_vocab,1)) x[id,0]=1 print(''.join([idx_to_char[c] for", "in enumerate(vocab)} print('char_to_idx:',char_to_idx) print('idx_to_char:',idx_to_char) start_ptr = 0 lr = 1e-1 time_step = 25", "x.append(n) y.append(smooth_loss) if n%1000 == 0: print('smooth_loss:',loss) sample(y_p=y_prev,c_p=c_prev) plt.ylabel('Loss') plt.xlabel('Epoch') plt.plot(x,y,color='r') plt.pause(1e-9) n+=1", "+= np.outer(dog_,x) dRo += np.outer(dog_,ys[t-1]) dy_o = np.dot(Ro.T,dog_) dbo += dog_ dig_ =", "+= np.outer(dig_,ys[t-1]) dy_i = np.dot(Ri.T,dig_) dbi += dig_ for param in [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz]: np.clip(param,-1,1,out=param)", "loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,y_prev,c_prev=forward_backward_pass(i=input,o=output,y_p=y_prev,c_p=c_prev) for params,dparams,mparams in zip([Wi,Ri,bi,Wo,Ro,bo,Wf,Rf,bf,Wz,Rz,bz,Wy,by],\\ [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby],[mWi,mRi,mbi,mWo,mRo,mbo,mWf,mRf,mbf,mWz,mRz,mbz,mWy,mby]): mparams += dparams*dparams params += -lr*dparams/np.sqrt(mparams+1e-8) smooth_loss", "= {} cs[-1] = np.copy(c_p) ys[-1] = np.copy(y_p) p = {} loss =", "while n<=epoches: if start_ptr+time_step>len_of_dataset: start_ptr = 0 y_prev = np.zeros((len_of_vocab,1)) else: input =", "y=[] smooth_loss = -np.log(1/len_of_vocab)*time_step while n<=epoches: if start_ptr+time_step>len_of_dataset: start_ptr = 0 y_prev =", "print('char_to_idx:',char_to_idx) print('idx_to_char:',idx_to_char) start_ptr = 0 lr = 1e-1 time_step = 25 mean =0.0", "0 x=[] y=[] smooth_loss = -np.log(1/len_of_vocab)*time_step while n<=epoches: if start_ptr+time_step>len_of_dataset: start_ptr = 0", "np.tanh(cs[t]) os[t] = np.dot(Wy,ys[t])+by p[t] = softmax(os[t]) loss += -np.log(p[t][o[t],0]) dWi,dRi,dbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi)", "in idx])) def forward_backward_pass(i,o,y_p,c_p): cs = {} ys = {} i_g = {}", "input = [char_to_idx[c] for c in dataset[start_ptr:start_ptr+time_step]] output = [char_to_idx[c] for c in", "len(dataset) print('len of dataset:',len_of_dataset) vocab = set(dataset) len_of_vocab = len(vocab) print('len of vocab:',len_of_vocab)", "range(time_step): x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1 I = np.dot(Wi,x)+np.dot(Ri,ys[t-1])+bi i_g[t] = sigmoid(I)", "0 for t in range(time_step): x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1 I =", "= np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) n = 0 x=[] y=[] smooth_loss = -np.log(1/len_of_vocab)*time_step while n<=epoches: if", "len_of_vocab = len(vocab) print('len of vocab:',len_of_vocab) char_to_idx = {char:idx for idx,char in enumerate(vocab)}", "* np.tanh(c_p) os = np.dot(Wy,y_p)+by p = softmax(os) id = np.random.choice(len_of_vocab,1,p=p.ravel())[0] idx.append(id) x", "dy_z,dy_f,dy_o,dy_i = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) dcs_c = np.zeros((len_of_vocab,1)) for t in reversed(range(time_step)): x = np.zeros((len_of_vocab,1))", "dy + dy_z + dy_f + dy_i + dy_o dcs = o_g[t] *", "dcs_c = f_g[t]*dcs dig = z_g[t]*dcs dog = np.tanh(cs[t])*dy dzg = i_g[t]*dcs dfg", "+= dig_ for param in [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz]: np.clip(param,-1,1,out=param) return loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,ys[time_step-1],cs[time_step-1] y_prev,c_prev = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) n", "o_g[t]*(1-o_g[t])*dog dWo += np.outer(dog_,x) dRo += np.outer(dog_,ys[t-1]) dy_o = np.dot(Ro.T,dog_) dbo += dog_", "len_of_dataset = len(dataset) print('len of dataset:',len_of_dataset) vocab = set(dataset) len_of_vocab = len(vocab) print('len", "= i_g[t]*(1-i_g[t])*dig dWi += np.outer(dig_,x) dRi += np.outer(dig_,ys[t-1]) dy_i = np.dot(Ri.T,dig_) dbi +=", "+ np.dot(Rz,y_p)+bz z_g = np.tanh(Z) c_p = i_g*z_g + f_g *c_p y_p =", "dfg = cs[t-1]*dcs dzg_ = (1-z_g[t]*z_g[t])*dzg dWz += np.outer(dzg_,x) dRz += np.outer(dzg_,ys[t-1]) dbz", "dy_i = np.dot(Ri.T,dig_) dbi += dig_ for param in [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz]: np.clip(param,-1,1,out=param) return loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,ys[time_step-1],cs[time_step-1]", "= np.copy(p[t]) do[o[t],0] -= 1 dWy += np.outer(do,ys[t]) dby += do dy =", "= i_g*z_g + f_g *c_p y_p = o_g * np.tanh(c_p) os = np.dot(Wy,y_p)+by", "= np.dot(Wy,y_p)+by p = softmax(os) id = np.random.choice(len_of_vocab,1,p=p.ravel())[0] idx.append(id) x = np.zeros((len_of_vocab,1)) x[id,0]=1", "= open('../data/input.txt','r').read() #dataset = open('../data/code.txt','r').read() len_of_dataset = len(dataset) print('len of dataset:',len_of_dataset) vocab =", "np.outer(dig_,ys[t-1]) dy_i = np.dot(Ri.T,dig_) dbi += dig_ for param in [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz]: np.clip(param,-1,1,out=param) return", "* np.tanh(cs[t]) os[t] = np.dot(Wy,ys[t])+by p[t] = softmax(os[t]) loss += -np.log(p[t][o[t],0]) dWi,dRi,dbi =", "x = np.zeros((len_of_vocab,1)) x[id,0]=1 print(''.join([idx_to_char[c] for c in idx])) def forward_backward_pass(i,o,y_p,c_p): cs =", "os = {} cs[-1] = np.copy(c_p) ys[-1] = np.copy(y_p) p = {} loss", "= np.dot(Rz.T,dzg_) dfg_ = f_g[t] * (1-f_g[t])*dfg dWf += np.outer(dfg_,x) dRf += np.outer(dfg_,ys[t-1])", "in range(time_step): x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1 I = np.dot(Wi,x)+np.dot(Ri,ys[t-1])+bi i_g[t] =", "np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1)) Wy,by = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.zeros((len_of_vocab,1)) mWi,mRi,mbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) mWo,mRo,mbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) mWf,mRf,mbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf)", "1 I = np.dot(Wi,x)+np.dot(Ri,ys[t-1])+bi i_g[t] = sigmoid(I) O = np.dot(Wo,x)+np.dot(Ro,ys[t-1])+bo o_g[t] = sigmoid(O)", "in reversed(range(time_step)): x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1 do = np.copy(p[t]) do[o[t],0] -=", "Wy,by = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.zeros((len_of_vocab,1)) mWi,mRi,mbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi) mWo,mRo,mbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo) mWf,mRf,mbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf) mWz,mRz,mbz", "np.dot(Rf,ys[t-1])+bf f_g[t] = sigmoid(F) Z = np.dot(Wz,x) + np.dot(Rz,ys[t-1])+bz z_g[t] = np.tanh(Z) cs[t]", "loss = 0 for t in range(time_step): x = np.zeros((len_of_vocab,1)) x[i[t],0] = 1", "+= np.outer(dzg_,ys[t-1]) dbz += dzg_ dy_z = np.dot(Rz.T,dzg_) dfg_ = f_g[t] * (1-f_g[t])*dfg", "np.outer(dig_,x) dRi += np.outer(dig_,ys[t-1]) dy_i = np.dot(Ri.T,dig_) dbi += dig_ for param in", "+ dy_f + dy_i + dy_o dcs = o_g[t] * (1-np.tanh(cs[t])*np.tanh(cs[t]))*dy + dcs_c", "np.dot(Wz,x) + np.dot(Rz,ys[t-1])+bz z_g[t] = np.tanh(Z) cs[t] = i_g[t]*z_g[t] + f_g[t] *cs[t-1] ys[t]", "for param in [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz]: np.clip(param,-1,1,out=param) return loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,ys[time_step-1],cs[time_step-1] y_prev,c_prev = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)) n = 0" ]
[ "A node with no children. Branch node Internal node A node with at", "proceeding from parent to child. Also known as subchild. Ancestor A node reachable", "child) return # Searching for an item # Adding a new item at", "[] return self class Tree(object): def __init__(self, data, num_children = 2, notation =", "The top node in a tree. Child A node directly connected to another", "converse notion of a child. Siblings A group of nodes with the same", "Pruning: Removing a whole section of a tree # Grafting: Adding a whole", "return # Searching for an item # Adding a new item at a", "an item # Adding a new item at a certain position on the", "to a tree # Finding the root for any node # Finding the", "number of children. A leaf is necessarily degree zero. Edge The connection between", "node = child) return # Searching for an item # Adding a new", "reachable by repeated proceeding from child to parent. Leaf External node (not common)", "edges connecting a node with a descendant. Level The level of a node", "a tree is the height of its root node. Depth The depth of", "root. Parent The converse notion of a child. Siblings A group of nodes", "Incomplete: insert with queue. def insert(data, node = self.root): if len(node.children) < node.num_children:", "else: for child in node.children: if len(child.children) < child.num_children: Tree.insert(data, node = child)", "0 disjoint trees. ''' class Node(object): def __init__(self, data, num_children = 2): self.data", "Deleting an item # Pruning: Removing a whole section of a tree #", "The level of a node is defined as: 1 + the number of", "child in node.children: if len(child.children) < child.num_children: Tree.insert(data, node = child) return #", "Finding the lowest common ancestor of two nodes # Enumerating all the items", "+ the number of edges between the node and the root. Height of", "A node directly connected to another node when moving away from the root.", "height of a tree is the height of its root node. Depth The", "for child in node.children: if len(child.children) < child.num_children: Tree.insert(data, node = child) return", "node in a tree. Child A node directly connected to another node when", "of its root node. Depth The depth of a node is the number", "a node is the number of edges from the tree's root node to", "= self.root): if len(node.children) < node.num_children: node.children.append(Node(data, node.num_children)) else: for child in node.children:", "n ≥ 0 disjoint trees. ''' class Node(object): def __init__(self, data, num_children =", "used in trees Root The top node in a tree. Child A node", "an item # Pruning: Removing a whole section of a tree # Grafting:", "with queue. def insert(data, node = self.root): if len(node.children) < node.num_children: node.children.append(Node(data, node.num_children))", "child. Siblings A group of nodes with the same parent. Descendant A node", "of a node is the number of edges from the tree's root node", "of a child. Siblings A group of nodes with the same parent. Descendant", "Level The level of a node is defined as: 1 + the number", "class Tree(object): def __init__(self, data, num_children = 2, notation = \"prefix\"): self.root =", "trees. ''' class Node(object): def __init__(self, data, num_children = 2): self.data = data", "repeated proceeding from parent to child. Also known as subchild. Ancestor A node", "parent. Descendant A node reachable by repeated proceeding from parent to child. Also", "# Common operations: # Incomplete: insert with queue. def insert(data, node = self.root):", "insert(data, node = self.root): if len(node.children) < node.num_children: node.children.append(Node(data, node.num_children)) else: for child", "= num_children self.children = [] return self class Tree(object): def __init__(self, data, num_children", "node reachable by repeated proceeding from parent to child. Also known as subchild.", "notion of a child. Siblings A group of nodes with the same parent.", "For a given node, its number of children. A leaf is necessarily degree", "__init__(self, data, num_children = 2): self.data = data self.num_children = num_children self.children =", "A node reachable by repeated proceeding from child to parent. Leaf External node", "node (not common) A node with no children. Branch node Internal node A", "< child.num_children: Tree.insert(data, node = child) return # Searching for an item #", "the tree # Deleting an item # Pruning: Removing a whole section of", "tree # Finding the root for any node # Finding the lowest common", "for an item # Adding a new item at a certain position on", "disjoint trees. ''' class Node(object): def __init__(self, data, num_children = 2): self.data =", "The height of a node is the number of edges on the longest", "number of edges between the node and the root. Height of node The", "Degree For a given node, its number of children. A leaf is necessarily", "Edge The connection between one node and another. Path A sequence of nodes", "with the same parent. Descendant A node reachable by repeated proceeding from parent", "a whole section of a tree # Grafting: Adding a whole section to", "# Pruning: Removing a whole section of a tree # Grafting: Adding a", "Path A sequence of nodes and edges connecting a node with a descendant.", "if len(child.children) < child.num_children: Tree.insert(data, node = child) return # Searching for an", "node with no children. Branch node Internal node A node with at least", "node, its number of children. A leaf is necessarily degree zero. Edge The", "node Internal node A node with at least one child. Degree For a", "descendant. Level The level of a node is defined as: 1 + the", "the root for any node # Finding the lowest common ancestor of two", "a certain position on the tree # Deleting an item # Pruning: Removing", "Searching for an item # Adding a new item at a certain position", "any node # Finding the lowest common ancestor of two nodes # Enumerating", "the node and the root. Height of node The height of a node", "tree # Deleting an item # Pruning: Removing a whole section of a", "of a tree # Grafting: Adding a whole section to a tree #", "Leaf External node (not common) A node with no children. Branch node Internal", "by repeated proceeding from parent to child. Also known as subchild. Ancestor A", "self.data = data self.num_children = num_children self.children = [] return self class Tree(object):", "children. A leaf is necessarily degree zero. Edge The connection between one node", "= notation # Common operations: # Incomplete: insert with queue. def insert(data, node", "A group of nodes with the same parent. Descendant A node reachable by", "known as subchild. Ancestor A node reachable by repeated proceeding from child to", "of nodes with the same parent. Descendant A node reachable by repeated proceeding", "self.children = [] return self class Tree(object): def __init__(self, data, num_children = 2,", "and the root. Height of node The height of a node is the", "nodes # Enumerating all the items # Enumerating a section of a tree", "nodes with the same parent. Descendant A node reachable by repeated proceeding from", "trees Root The top node in a tree. Child A node directly connected", "connected to another node when moving away from the root. Parent The converse", "the root. Parent The converse notion of a child. Siblings A group of", "Root The top node in a tree. Child A node directly connected to", "root node to the node. Forest A forest is a set of n", "of children. A leaf is necessarily degree zero. Edge The connection between one", "node and a leaf. Height of tree The height of a tree is", "edges from the tree's root node to the node. Forest A forest is", "longest path between that node and a leaf. Height of tree The height", "len(node.children) < node.num_children: node.children.append(Node(data, node.num_children)) else: for child in node.children: if len(child.children) <", "tree's root node to the node. Forest A forest is a set of", "tree. Child A node directly connected to another node when moving away from", "Height of tree The height of a tree is the height of its", "Removing a whole section of a tree # Grafting: Adding a whole section", "of edges between the node and the root. Height of node The height", "as subchild. Ancestor A node reachable by repeated proceeding from child to parent.", "= [] return self class Tree(object): def __init__(self, data, num_children = 2, notation", "the node. Forest A forest is a set of n ≥ 0 disjoint", "def insert(data, node = self.root): if len(node.children) < node.num_children: node.children.append(Node(data, node.num_children)) else: for", "of edges from the tree's root node to the node. Forest A forest", "self.root = Node(data, num_children) self.notation = notation # Common operations: # Incomplete: insert", "subchild. Ancestor A node reachable by repeated proceeding from child to parent. Leaf", "Tree(object): def __init__(self, data, num_children = 2, notation = \"prefix\"): self.root = Node(data,", "self.num_children = num_children self.children = [] return self class Tree(object): def __init__(self, data,", "< node.num_children: node.children.append(Node(data, node.num_children)) else: for child in node.children: if len(child.children) < child.num_children:", "set of n ≥ 0 disjoint trees. ''' class Node(object): def __init__(self, data,", "the same parent. Descendant A node reachable by repeated proceeding from parent to", "to the node. Forest A forest is a set of n ≥ 0", "data self.num_children = num_children self.children = [] return self class Tree(object): def __init__(self,", "same parent. Descendant A node reachable by repeated proceeding from parent to child.", "node.children: if len(child.children) < child.num_children: Tree.insert(data, node = child) return # Searching for", "as: 1 + the number of edges between the node and the root.", "when moving away from the root. Parent The converse notion of a child.", "connection between one node and another. Path A sequence of nodes and edges", "A leaf is necessarily degree zero. Edge The connection between one node and", "# Incomplete: insert with queue. def insert(data, node = self.root): if len(node.children) <", "at least one child. Degree For a given node, its number of children.", "the height of its root node. Depth The depth of a node is", "between one node and another. Path A sequence of nodes and edges connecting", "away from the root. Parent The converse notion of a child. Siblings A", "a tree # Finding the root for any node # Finding the lowest", "Adding a whole section to a tree # Finding the root for any", "connecting a node with a descendant. Level The level of a node is", "between that node and a leaf. Height of tree The height of a", "The height of a tree is the height of its root node. Depth", "is the number of edges on the longest path between that node and", "return self class Tree(object): def __init__(self, data, num_children = 2, notation = \"prefix\"):", "group of nodes with the same parent. Descendant A node reachable by repeated", "node = self.root): if len(node.children) < node.num_children: node.children.append(Node(data, node.num_children)) else: for child in", "from parent to child. Also known as subchild. Ancestor A node reachable by", "node The height of a node is the number of edges on the", "node directly connected to another node when moving away from the root. Parent", "the tree's root node to the node. Forest A forest is a set", "one child. Degree For a given node, its number of children. A leaf", "edges between the node and the root. Height of node The height of", "degree zero. Edge The connection between one node and another. Path A sequence", "def __init__(self, data, num_children = 2, notation = \"prefix\"): self.root = Node(data, num_children)", "of a node is the number of edges on the longest path between", "Siblings A group of nodes with the same parent. Descendant A node reachable", "that node and a leaf. Height of tree The height of a tree", "= \"prefix\"): self.root = Node(data, num_children) self.notation = notation # Common operations: #", "A sequence of nodes and edges connecting a node with a descendant. Level", "for any node # Finding the lowest common ancestor of two nodes #", "leaf is necessarily degree zero. Edge The connection between one node and another.", "one node and another. Path A sequence of nodes and edges connecting a", "by repeated proceeding from child to parent. Leaf External node (not common) A", "in a tree. Child A node directly connected to another node when moving", "whole section to a tree # Finding the root for any node #", "class Node(object): def __init__(self, data, num_children = 2): self.data = data self.num_children =", "Finding the root for any node # Finding the lowest common ancestor of", "node with a descendant. Level The level of a node is defined as:", "proceeding from child to parent. Leaf External node (not common) A node with", "node and another. Path A sequence of nodes and edges connecting a node", "a tree # Grafting: Adding a whole section to a tree # Finding", "Adding a new item at a certain position on the tree # Deleting", "Branch node Internal node A node with at least one child. Degree For", "# Searching for an item # Adding a new item at a certain", "no children. Branch node Internal node A node with at least one child.", "is the height of its root node. Depth The depth of a node", "necessarily degree zero. Edge The connection between one node and another. Path A", "# Grafting: Adding a whole section to a tree # Finding the root", "self.notation = notation # Common operations: # Incomplete: insert with queue. def insert(data,", "node is the number of edges from the tree's root node to the", "Node(object): def __init__(self, data, num_children = 2): self.data = data self.num_children = num_children", "the longest path between that node and a leaf. Height of tree The", "of n ≥ 0 disjoint trees. ''' class Node(object): def __init__(self, data, num_children", "Internal node A node with at least one child. Degree For a given", "and edges connecting a node with a descendant. Level The level of a", "A forest is a set of n ≥ 0 disjoint trees. ''' class", "position on the tree # Deleting an item # Pruning: Removing a whole", "to child. Also known as subchild. Ancestor A node reachable by repeated proceeding", "certain position on the tree # Deleting an item # Pruning: Removing a", "node. Forest A forest is a set of n ≥ 0 disjoint trees.", "root. Height of node The height of a node is the number of", "of edges on the longest path between that node and a leaf. Height", "data, num_children = 2): self.data = data self.num_children = num_children self.children = []", "zero. Edge The connection between one node and another. Path A sequence of", "its number of children. A leaf is necessarily degree zero. Edge The connection", "A node with at least one child. Degree For a given node, its", "another node when moving away from the root. Parent The converse notion of", "depth of a node is the number of edges from the tree's root", "a node is the number of edges on the longest path between that", "self.root): if len(node.children) < node.num_children: node.children.append(Node(data, node.num_children)) else: for child in node.children: if", "__init__(self, data, num_children = 2, notation = \"prefix\"): self.root = Node(data, num_children) self.notation", "is the number of edges from the tree's root node to the node.", "node A node with at least one child. Degree For a given node,", "node. Depth The depth of a node is the number of edges from", "between the node and the root. Height of node The height of a", "queue. def insert(data, node = self.root): if len(node.children) < node.num_children: node.children.append(Node(data, node.num_children)) else:", "the lowest common ancestor of two nodes # Enumerating all the items #", "2, notation = \"prefix\"): self.root = Node(data, num_children) self.notation = notation # Common", "reachable by repeated proceeding from parent to child. Also known as subchild. Ancestor", "data, num_children = 2, notation = \"prefix\"): self.root = Node(data, num_children) self.notation =", "with at least one child. Degree For a given node, its number of", "two nodes # Enumerating all the items # Enumerating a section of a", "is defined as: 1 + the number of edges between the node and", "= child) return # Searching for an item # Adding a new item", "common) A node with no children. Branch node Internal node A node with", "lowest common ancestor of two nodes # Enumerating all the items # Enumerating", "ancestor of two nodes # Enumerating all the items # Enumerating a section", "Common operations: # Incomplete: insert with queue. def insert(data, node = self.root): if", "a node is defined as: 1 + the number of edges between the", "a tree. Child A node directly connected to another node when moving away", "from child to parent. Leaf External node (not common) A node with no", "with no children. Branch node Internal node A node with at least one", "is necessarily degree zero. Edge The connection between one node and another. Path", "Height of node The height of a node is the number of edges", "of a node is defined as: 1 + the number of edges between", "of a tree is the height of its root node. Depth The depth", "Parent The converse notion of a child. Siblings A group of nodes with", "num_children) self.notation = notation # Common operations: # Incomplete: insert with queue. def", "= Node(data, num_children) self.notation = notation # Common operations: # Incomplete: insert with", "child to parent. Leaf External node (not common) A node with no children.", "height of a node is the number of edges on the longest path", "tree is the height of its root node. Depth The depth of a", "with a descendant. Level The level of a node is defined as: 1", "node and the root. Height of node The height of a node is", "operations: # Incomplete: insert with queue. def insert(data, node = self.root): if len(node.children)", "def __init__(self, data, num_children = 2): self.data = data self.num_children = num_children self.children", "= 2, notation = \"prefix\"): self.root = Node(data, num_children) self.notation = notation #", "its root node. Depth The depth of a node is the number of", "# Deleting an item # Pruning: Removing a whole section of a tree", "Tree.insert(data, node = child) return # Searching for an item # Adding a", "number of edges from the tree's root node to the node. Forest A", "Terminology used in trees Root The top node in a tree. Child A", "= data self.num_children = num_children self.children = [] return self class Tree(object): def", "(not common) A node with no children. Branch node Internal node A node", "item # Adding a new item at a certain position on the tree", "2): self.data = data self.num_children = num_children self.children = [] return self class", "least one child. Degree For a given node, its number of children. A", "root for any node # Finding the lowest common ancestor of two nodes", "num_children = 2, notation = \"prefix\"): self.root = Node(data, num_children) self.notation = notation", "to parent. Leaf External node (not common) A node with no children. Branch", "a leaf. Height of tree The height of a tree is the height", "Node(data, num_children) self.notation = notation # Common operations: # Incomplete: insert with queue.", "common ancestor of two nodes # Enumerating all the items # Enumerating a", "node when moving away from the root. Parent The converse notion of a", "another. Path A sequence of nodes and edges connecting a node with a", "a new item at a certain position on the tree # Deleting an", "The converse notion of a child. Siblings A group of nodes with the", "child.num_children: Tree.insert(data, node = child) return # Searching for an item # Adding", "moving away from the root. Parent The converse notion of a child. Siblings", "the root. Height of node The height of a node is the number", "from the tree's root node to the node. Forest A forest is a", "section to a tree # Finding the root for any node # Finding", "tree The height of a tree is the height of its root node.", "node # Finding the lowest common ancestor of two nodes # Enumerating all", "item at a certain position on the tree # Deleting an item #", "of tree The height of a tree is the height of its root", "num_children self.children = [] return self class Tree(object): def __init__(self, data, num_children =", "node.children.append(Node(data, node.num_children)) else: for child in node.children: if len(child.children) < child.num_children: Tree.insert(data, node", "of nodes and edges connecting a node with a descendant. Level The level", "new item at a certain position on the tree # Deleting an item", "on the tree # Deleting an item # Pruning: Removing a whole section", "Grafting: Adding a whole section to a tree # Finding the root for", "Child A node directly connected to another node when moving away from the", "Forest A forest is a set of n ≥ 0 disjoint trees. '''", "defined as: 1 + the number of edges between the node and the", "1 + the number of edges between the node and the root. Height", "A node reachable by repeated proceeding from parent to child. Also known as", "node to the node. Forest A forest is a set of n ≥", "a node with a descendant. Level The level of a node is defined", "child. Also known as subchild. Ancestor A node reachable by repeated proceeding from", "''' Terminology used in trees Root The top node in a tree. Child", "The depth of a node is the number of edges from the tree's", "notation = \"prefix\"): self.root = Node(data, num_children) self.notation = notation # Common operations:", "forest is a set of n ≥ 0 disjoint trees. ''' class Node(object):", "at a certain position on the tree # Deleting an item # Pruning:", "insert with queue. def insert(data, node = self.root): if len(node.children) < node.num_children: node.children.append(Node(data,", "node is the number of edges on the longest path between that node", "on the longest path between that node and a leaf. Height of tree", "children. Branch node Internal node A node with at least one child. Degree", "the number of edges on the longest path between that node and a", "directly connected to another node when moving away from the root. Parent The", "node reachable by repeated proceeding from child to parent. Leaf External node (not", "if len(node.children) < node.num_children: node.children.append(Node(data, node.num_children)) else: for child in node.children: if len(child.children)", "of node The height of a node is the number of edges on", "section of a tree # Grafting: Adding a whole section to a tree", "= 2): self.data = data self.num_children = num_children self.children = [] return self", "from the root. Parent The converse notion of a child. Siblings A group", "Ancestor A node reachable by repeated proceeding from child to parent. Leaf External", "root node. Depth The depth of a node is the number of edges", "self class Tree(object): def __init__(self, data, num_children = 2, notation = \"prefix\"): self.root", "a set of n ≥ 0 disjoint trees. ''' class Node(object): def __init__(self,", "leaf. Height of tree The height of a tree is the height of", "child. Degree For a given node, its number of children. A leaf is", "repeated proceeding from child to parent. Leaf External node (not common) A node", "parent to child. Also known as subchild. Ancestor A node reachable by repeated", "Depth The depth of a node is the number of edges from the", "given node, its number of children. A leaf is necessarily degree zero. Edge", "a given node, its number of children. A leaf is necessarily degree zero.", "# Finding the root for any node # Finding the lowest common ancestor", "in trees Root The top node in a tree. Child A node directly", "Also known as subchild. Ancestor A node reachable by repeated proceeding from child", "to another node when moving away from the root. Parent The converse notion", "node with at least one child. Degree For a given node, its number", "\"prefix\"): self.root = Node(data, num_children) self.notation = notation # Common operations: # Incomplete:", "External node (not common) A node with no children. Branch node Internal node", "node.num_children)) else: for child in node.children: if len(child.children) < child.num_children: Tree.insert(data, node =", "nodes and edges connecting a node with a descendant. Level The level of", "tree # Grafting: Adding a whole section to a tree # Finding the", "≥ 0 disjoint trees. ''' class Node(object): def __init__(self, data, num_children = 2):", "edges on the longest path between that node and a leaf. Height of", "top node in a tree. Child A node directly connected to another node", "a child. Siblings A group of nodes with the same parent. Descendant A", "in node.children: if len(child.children) < child.num_children: Tree.insert(data, node = child) return # Searching", "# Finding the lowest common ancestor of two nodes # Enumerating all the", "num_children = 2): self.data = data self.num_children = num_children self.children = [] return", "number of edges on the longest path between that node and a leaf.", "a descendant. Level The level of a node is defined as: 1 +", "height of its root node. Depth The depth of a node is the", "node.num_children: node.children.append(Node(data, node.num_children)) else: for child in node.children: if len(child.children) < child.num_children: Tree.insert(data,", "node is defined as: 1 + the number of edges between the node", "The connection between one node and another. Path A sequence of nodes and", "Descendant A node reachable by repeated proceeding from parent to child. Also known", "the number of edges from the tree's root node to the node. Forest", "level of a node is defined as: 1 + the number of edges", "and another. Path A sequence of nodes and edges connecting a node with", "# Adding a new item at a certain position on the tree #", "the number of edges between the node and the root. Height of node", "path between that node and a leaf. Height of tree The height of", "of two nodes # Enumerating all the items # Enumerating a section of", "whole section of a tree # Grafting: Adding a whole section to a", "notation # Common operations: # Incomplete: insert with queue. def insert(data, node =", "and a leaf. Height of tree The height of a tree is the", "''' class Node(object): def __init__(self, data, num_children = 2): self.data = data self.num_children", "len(child.children) < child.num_children: Tree.insert(data, node = child) return # Searching for an item", "a whole section to a tree # Finding the root for any node", "sequence of nodes and edges connecting a node with a descendant. Level The", "item # Pruning: Removing a whole section of a tree # Grafting: Adding", "parent. Leaf External node (not common) A node with no children. Branch node", "is a set of n ≥ 0 disjoint trees. ''' class Node(object): def" ]
[ "dependencies = [ ('get_random_quote', '0016_auto_20200525_0146'), ] operations = [ migrations.RenameField( model_name='quote', old_name='photo', new_name='image_object',", "by Django 3.0.3 on 2020-05-24 21:05 from django.db import migrations class Migration(migrations.Migration): dependencies", "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('get_random_quote', '0016_auto_20200525_0146'), ] operations =", "migrations class Migration(migrations.Migration): dependencies = [ ('get_random_quote', '0016_auto_20200525_0146'), ] operations = [ migrations.RenameField(", "Migration(migrations.Migration): dependencies = [ ('get_random_quote', '0016_auto_20200525_0146'), ] operations = [ migrations.RenameField( model_name='quote', old_name='photo',", "21:05 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('get_random_quote', '0016_auto_20200525_0146'), ]", "<gh_stars>0 # Generated by Django 3.0.3 on 2020-05-24 21:05 from django.db import migrations", "3.0.3 on 2020-05-24 21:05 from django.db import migrations class Migration(migrations.Migration): dependencies = [", "2020-05-24 21:05 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('get_random_quote', '0016_auto_20200525_0146'),", "on 2020-05-24 21:05 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('get_random_quote',", "Generated by Django 3.0.3 on 2020-05-24 21:05 from django.db import migrations class Migration(migrations.Migration):", "Django 3.0.3 on 2020-05-24 21:05 from django.db import migrations class Migration(migrations.Migration): dependencies =", "[ ('get_random_quote', '0016_auto_20200525_0146'), ] operations = [ migrations.RenameField( model_name='quote', old_name='photo', new_name='image_object', ), ]", "class Migration(migrations.Migration): dependencies = [ ('get_random_quote', '0016_auto_20200525_0146'), ] operations = [ migrations.RenameField( model_name='quote',", "# Generated by Django 3.0.3 on 2020-05-24 21:05 from django.db import migrations class", "= [ ('get_random_quote', '0016_auto_20200525_0146'), ] operations = [ migrations.RenameField( model_name='quote', old_name='photo', new_name='image_object', ),", "import migrations class Migration(migrations.Migration): dependencies = [ ('get_random_quote', '0016_auto_20200525_0146'), ] operations = [", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('get_random_quote', '0016_auto_20200525_0146'), ] operations" ]
[ "time import sleep from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium import webdriver sleep(5) def", "sleep from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium import webdriver sleep(5) def screenshot_main(): driver", "<reponame>Praneethvvs/CircleCi_FastApi<gh_stars>0 from time import sleep from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium import webdriver", "selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium import webdriver sleep(5) def screenshot_main(): driver = webdriver.Remote(\"http://:4444/wd/hub\",desired_capabilities=DesiredCapabilities.CHROME)", "import DesiredCapabilities from selenium import webdriver sleep(5) def screenshot_main(): driver = webdriver.Remote(\"http://:4444/wd/hub\",desired_capabilities=DesiredCapabilities.CHROME) driver.get(\"https://python.org\")", "from time import sleep from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium import webdriver sleep(5)", "import sleep from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium import webdriver sleep(5) def screenshot_main():", "DesiredCapabilities from selenium import webdriver sleep(5) def screenshot_main(): driver = webdriver.Remote(\"http://:4444/wd/hub\",desired_capabilities=DesiredCapabilities.CHROME) driver.get(\"https://python.org\") driver.save_screenshot(\"screenshot.png\")", "from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium import webdriver sleep(5) def screenshot_main(): driver =" ]
[ "import argparse import logging import os import secrets from typing import NamedTuple SSS_IP", "sockf): # Make sure the socket does not already exist try: os.unlink(sockf) except", "Random seed: 32bytes elif op == REG: self.devs[dev_id] = Device(dev_id, REG, csock) resp_op", "int), ('status', int), ('csock', socket.socket)]) class SSS: def __init__(self, sockf): # Make sure", "('csock', socket.socket)]) class SSS: def __init__(self, sockf): # Make sure the socket does", "on the registering SED by comparing to the SSS's # registration secret #", "any receiving SED which is part of the deployment. # # Deregistration is", "f'/secrets/{dev_id}_secret' if os.path.exists(secret_path): with open(secret_path, \"rb\") as secret_file: # Read in the registration", "deregistered. else: self.devs[dev_id] = Device(dev_id, DEREG, csock) resp_op = DEREG logging.info(f'{dev_id}:Deregistered') body =", "SED doesn't receive these keys # its messages will be thrown out by", "the 2021 MITRE eCTF competition, # and may not meet MITRE standards for", "may communicate with other # deployed SEDs while through use of the aformentioned", "a given SED _, _, _, _, dev_id, op, scewl_secret = struct.unpack('<HHHHHH64s', data)", "_, _, _, dev_id, op, scewl_secret = struct.unpack('<HHHHHH64s', data) '''Message responses are constructed", "deployment. # AES key: 16 bytes # HMAC key: 64 bytes # Random", "SED which was verified previously to register and # hasn't already been deregistered.", "of attributed sockets first old_ids = [] for dev in self.devs.values(): if dev.csock", "return rready if op == 'r' else wready def handle_transaction(self, csock: socket.SocketType): logging.debug('handling", "struct.pack('<Hh', dev_id, resp_op) # Send response to SED constructed in the previous section", "secret for verification checked_secret = secret_file.read(64) # Scewl_secret mismatch, registration key provided by", "dev_id, op, scewl_secret = struct.unpack('<HHHHHH64s', data) '''Message responses are constructed below''' # Read", "csock) resp_op = DEREG logging.info(f'{dev_id}:Deregistered') body = struct.pack('<Hh', dev_id, resp_op) # Record some", "b'' while len(data) < 76: recvd = csock.recv(76 - len(data)) data += recvd", "being provided only for educational purposes for the 2021 MITRE eCTF competition, #", "for verification checked_secret = secret_file.read(64) # Scewl_secret mismatch, registration key provided by SED", "Send response to SED constructed in the previous section resp = struct.pack('<2sHHH', b'SC',", "else: self.devs[dev_id] = Device(dev_id, DEREG, csock) resp_op = DEREG logging.info(f'{dev_id}:Deregistered') body = struct.pack('<Hh',", "# Deregistration is handled by sending deregistration message and removing registration secret from", "messages before responding def start(self): unattributed_socks = set() # serve forever while True:", "__init__(self, sockf): # Make sure the socket does not already exist try: os.unlink(sockf)", "the registration secret for verification checked_secret = secret_file.read(64) # Scewl_secret mismatch, registration key", "map of SCEWL IDs to statuses sss = SSS(args.sockf) sss.start() if __name__ ==", "== REG: self.devs[dev_id] = Device(dev_id, REG, csock) resp_op = REG with open(\"/secrets/aes_key\", \"rb\")", "scewl_secret # 2) Validate the scewl_secret that resides on the registering SED by", "4) Send some error given a discrepancy # # Succesful execution of this", "the case that an SED state already reflects the # received op. Log", "open(\"/secrets/hmac_key\", \"rb\") as hmac_file: hmac_key = hmac_file.read(64) logging.info(f'{dev_id}:Registered') seed = secrets.token_bytes(32) body =", "This code is being provided only for educational purposes for the 2021 MITRE", "SSS_ID, len(body)) + body logging.debug(f'Sending response {repr(data)}') csock.send(resp) # The following methods reflect", "SED which can communicate in the deployment. # AES key: 16 bytes #", "that resides on the registering SED by comparing to the SSS's # registration", "of this procedure means a given SED is valid and may communicate with", "_ = select.select([sock], [sock], [], 0) return rready if op == 'r' else", "from a given SED _, _, _, _, dev_id, op, scewl_secret = struct.unpack('<HHHHHH64s',", "= Device(dev_id, REG, csock) resp_op = REG with open(\"/secrets/aes_key\", \"rb\") as aes_file: aes_key", "AES key: 16 bytes # HMAC key: 64 bytes # Random seed: 32bytes", "receiving SED which is part of the deployment. # # Deregistration is handled", "set() # serve forever while True: # check for new client if self.sock_ready(self.sock):", "for new client if self.sock_ready(self.sock): csock, _ = self.sock.accept() logging.info(f':New connection') unattributed_socks.add(csock) continue", "eCTF # SCEWL Security Server # <NAME> # # (c) 2021 The MITRE", "# Read in the registration secret for verification checked_secret = secret_file.read(64) # Scewl_secret", "hasn't already been deregistered. else: self.devs[dev_id] = Device(dev_id, DEREG, csock) resp_op = DEREG", "the deployment. # AES key: 16 bytes # HMAC key: 64 bytes #", "given SED _, _, _, _, dev_id, op, scewl_secret = struct.unpack('<HHHHHH64s', data) '''Message", "logging.debug(f'Received buffer: {repr(data)}') # Unpack message received from a given SED _, _,", "self.sock.bind(sockf) self.sock.listen(10) self.devs = {} @staticmethod def sock_ready(sock, op='r'): rready, wready, _ =", "of unattributed sockets first for csock in unattributed_socks: try: if self.sock_ready(csock): self.handle_transaction(csock) unattributed_socks.remove(csock)", "competition, # and may not meet MITRE standards for quality. Use this code", "key provided by SED is invalid. Log this event # and back ALREADY", "an SED as valid and distributing deployment wide keys. # # Registration: #", "specified # by the {dev_id}_secrets folders generated in dockerfiles/2b_create_sed_secrets.Dockerfile else: resp_op = ALREADY", "SSS_ID = 1 # mirroring scewl enum at scewl.c:4 ALREADY, REG, DEREG =", "for closed connection if not recvd: raise ConnectionResetError logging.debug(f'Received buffer: {repr(data)}') # Unpack", "sock_ready(sock, op='r'): rready, wready, _ = select.select([sock], [sock], [], 0) return rready if", "own risk! # # This is the Secure SCEWL Server that handles SED", "# # This is the Secure SCEWL Server that handles SED registration and", "dev.csock and self.sock_ready(dev.csock): try: self.handle_transaction(dev.csock) except (ConnectionResetError, BrokenPipeError): logging.info(f'{dev.id}:Connection closed') dev.csock.close() old_ids.append(dev.id) for", "should be noted that any key generation is done within respective dockerfiles and", "os import secrets from typing import NamedTuple SSS_IP = 'localhost' SSS_ID = 1", "transaction in the case that an SED state already reflects the # received", "# Unpack message received from a given SED _, _, _, _, dev_id,", "the {dev_id}_secrets folders generated in dockerfiles/2b_create_sed_secrets.Dockerfile else: resp_op = ALREADY logging.info(f'{dev_id}:bad ID') body", "check for new client if self.sock_ready(self.sock): csock, _ = self.sock.accept() logging.info(f':New connection') unattributed_socks.add(csock)", "# <NAME> # # (c) 2021 The MITRE Corporation # # This source", "wready def handle_transaction(self, csock: socket.SocketType): logging.debug('handling transaction') data = b'' while len(data) <", "deployment as specified # by the {dev_id}_secrets folders generated in dockerfiles/2b_create_sed_secrets.Dockerfile else: resp_op", "already been deregistered. else: self.devs[dev_id] = Device(dev_id, DEREG, csock) resp_op = DEREG logging.info(f'{dev_id}:Deregistered')", "scewl.c:4 ALREADY, REG, DEREG = -1, 0, 1 logging.basicConfig(level=logging.INFO) Device = NamedTuple('Device', [('id',", "Record deregistration for an SED which was verified previously to register and #", "BrokenPipeError): logging.info(':Connection closed') unattributed_socks.remove(csock) csock.close() break # check pool of attributed sockets first", "of the deployment. # # Deregistration is handled by sending deregistration message and", "in old_ids: del self.devs[dev_id] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('sockf', help='Path to socket", "2021 Embedded System CTF (eCTF). # This code is being provided only for", "deregistration for an SED which was verified previously to register and # hasn't", "exist try: os.unlink(sockf) except OSError: if os.path.exists(sockf): raise self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.bind(sockf)", "unattributed_socks.remove(csock) break except (ConnectionResetError, BrokenPipeError): logging.info(':Connection closed') unattributed_socks.remove(csock) csock.close() break # check pool", "mirroring scewl enum at scewl.c:4 ALREADY, REG, DEREG = -1, 0, 1 logging.basicConfig(level=logging.INFO)", "secrets from typing import NamedTuple SSS_IP = 'localhost' SSS_ID = 1 # mirroring", "REG: self.devs[dev_id] = Device(dev_id, REG, csock) resp_op = REG with open(\"/secrets/aes_key\", \"rb\") as", "ID') body = struct.pack('<Hh', dev_id, resp_op) # Send response to SED constructed in", "that an SED state already reflects the # received op. Log this event.", "{\"Registered\" if op == REG else \"Deregistered\"}') body = struct.pack('<Hh', dev_id, resp_op) #", "given # deployment. Minimal changes have been made to the provided source files", "the provided source files to allow for a these # features. It should", "[('id', int), ('status', int), ('csock', socket.socket)]) class SSS: def __init__(self, sockf): # Make", "deployment. Minimal changes have been made to the provided source files to allow", "this script primarily focuses on verifying an SED as valid and distributing deployment", "and may not meet MITRE standards for quality. Use this code at your", "is valid and may communicate with other # deployed SEDs while through use", "the SEDs {dev_id}_secrets folder. This may happen if # an SED is attempted", "Security Server # <NAME> # # (c) 2021 The MITRE Corporation # #", "resp = struct.pack('<2sHHH', b'SC', dev_id, SSS_ID, len(body)) + body logging.debug(f'Sending response {repr(data)}') csock.send(resp)", "that handles SED registration and key distribution for any given # deployment. Minimal", "= [] for dev in self.devs.values(): if dev.csock and self.sock_ready(dev.csock): try: self.handle_transaction(dev.csock) except", "if dev.csock and self.sock_ready(dev.csock): try: self.handle_transaction(dev.csock) except (ConnectionResetError, BrokenPipeError): logging.info(f'{dev.id}:Connection closed') dev.csock.close() old_ids.append(dev.id)", "may happen if # an SED is attempted to register, which should not", "seed (32B), given a match # 4) Send some error given a discrepancy", "valid dev_id, establish path to SSS registration secret and scewl_secret # 2) Validate", "os.unlink(sockf) except OSError: if os.path.exists(sockf): raise self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.bind(sockf) self.sock.listen(10) self.devs", "resp_op) # Requesting repeat transaction in the case that an SED state already", "the provided insecure implementation and keep the SSS active # to received registration", "del self.devs[dev_id] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('sockf', help='Path to socket to bind", "typing import NamedTuple SSS_IP = 'localhost' SSS_ID = 1 # mirroring scewl enum", "message received from a given SED _, _, _, _, dev_id, op, scewl_secret", "to the provided source files to allow for a these # features. It", "dockerfiles and # this script primarily focuses on verifying an SED as valid", "SCEWL Server that handles SED registration and key distribution for any given #", "SSS: def __init__(self, sockf): # Make sure the socket does not already exist", "self.devs[dev_id] = Device(dev_id, REG, csock) resp_op = REG with open(\"/secrets/aes_key\", \"rb\") as aes_file:", "if # an SED is attempted to register, which should not be included", "to SED constructed in the previous section resp = struct.pack('<2sHHH', b'SC', dev_id, SSS_ID,", "is invalid. Log this event # and back ALREADY resp_op into the response.", "distributing deployment wide keys. # # Registration: # 1) Given any SED with", "handle_transaction(self, csock: socket.SocketType): logging.debug('handling transaction') data = b'' while len(data) < 76: recvd", "in self.devs.values(): if dev.csock and self.sock_ready(dev.csock): try: self.handle_transaction(dev.csock) except (ConnectionResetError, BrokenPipeError): logging.info(f'{dev.id}:Connection closed')", "Log this event. elif dev_id in self.devs and self.devs[dev_id].status == op: resp_op =", "of SCEWL IDs to statuses sss = SSS(args.sockf) sss.start() if __name__ == '__main__':", "sending deregistration message and removing registration secret from # the SED (see dockerfiles/3_remove_sed.Dockerfile)", "Use this code at your own risk! # # This is the Secure", "_, _, _, _, dev_id, op, scewl_secret = struct.unpack('<HHHHHH64s', data) '''Message responses are", "sure the socket does not already exist try: os.unlink(sockf) except OSError: if os.path.exists(sockf):", "changes have been made to the provided source files to allow for a", "def start(self): unattributed_socks = set() # serve forever while True: # check for", "pack into response. This is # a valid SED which can communicate in", "# Send response to SED constructed in the previous section resp = struct.pack('<2sHHH',", "handles SED registration and key distribution for any given # deployment. Minimal changes", "op: resp_op = ALREADY logging.info(f'{dev_id}:already {\"Registered\" if op == REG else \"Deregistered\"}') body", "[] for dev in self.devs.values(): if dev.csock and self.sock_ready(dev.csock): try: self.handle_transaction(dev.csock) except (ConnectionResetError,", "body = struct.pack('<Hh', dev_id, resp_op) # Requesting repeat transaction in the case that", "doesn't receive these keys # its messages will be thrown out by any", "+ body logging.debug(f'Sending response {repr(data)}') csock.send(resp) # The following methods reflect the provided", "old_ids.append(dev.id) for dev_id in old_ids: del self.devs[dev_id] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('sockf',", "as secret_file: # Read in the registration secret for verification checked_secret = secret_file.read(64)", "repeat transaction in the case that an SED state already reflects the #", "elif dev_id in self.devs and self.devs[dev_id].status == op: resp_op = ALREADY logging.info(f'{dev_id}:already {\"Registered\"", "op == REG else \"Deregistered\"}') body = struct.pack('<Hh', dev_id, resp_op) # Record registration", "verified previously to register and # hasn't already been deregistered. else: self.devs[dev_id] =", "by comparing to the SSS's # registration secret # 3) Distribute AES key", "1 # mirroring scewl enum at scewl.c:4 ALREADY, REG, DEREG = -1, 0,", "Succesful execution of this procedure means a given SED is valid and may", "SEDs while through use of the aformentioned keys. If an SED doesn't receive", "of the aformentioned keys. If an SED doesn't receive these keys # its", "have been made to the provided source files to allow for a these", "keys, this SED is # considered invalid for registration. if checked_secret != scewl_secret:", "= ALREADY logging.info(f'{dev_id}:bad ID') body = struct.pack('<Hh', dev_id, resp_op) # Send response to", "registration secret from # the SED (see dockerfiles/3_remove_sed.Dockerfile) import socket import select import", "seed = secrets.token_bytes(32) body = struct.pack('<Hh16s32s64s', dev_id, resp_op, aes_key, seed, hmac_key) # Record", "Scewl_secret mismatch, registration key provided by SED is invalid. Log this event #", "# the SED (see dockerfiles/3_remove_sed.Dockerfile) import socket import select import struct import argparse", "constructed in the previous section resp = struct.pack('<2sHHH', b'SC', dev_id, SSS_ID, len(body)) +", "raise ConnectionResetError logging.debug(f'Received buffer: {repr(data)}') # Unpack message received from a given SED", "key distribution for any given # deployment. Minimal changes have been made to", "as valid and distributing deployment wide keys. # # Registration: # 1) Given", "will be thrown out by any receiving SED which is part of the", "this SED is # considered invalid for registration. if checked_secret != scewl_secret: logging.info(f'{dev_id}:expected:", "# Random seed: 32bytes elif op == REG: self.devs[dev_id] = Device(dev_id, REG, csock)", "closed connection if not recvd: raise ConnectionResetError logging.debug(f'Received buffer: {repr(data)}') # Unpack message", "unattributed_socks.remove(csock) csock.close() break # check pool of attributed sockets first old_ids = []", "\"rb\") as hmac_file: hmac_key = hmac_file.read(64) logging.info(f'{dev_id}:Registered') seed = secrets.token_bytes(32) body = struct.pack('<Hh16s32s64s',", "= struct.pack('<2sHHH', b'SC', dev_id, SSS_ID, len(body)) + body logging.debug(f'Sending response {repr(data)}') csock.send(resp) #", "mismatch') body = struct.pack('<Hh', dev_id, resp_op) # Requesting repeat transaction in the case", "(64B) and Random seed (32B), given a match # 4) Send some error", "# # Registration: # 1) Given any SED with valid dev_id, establish path", "== op: resp_op = ALREADY logging.info(f'{dev_id}:already {\"Registered\" if op == REG else \"Deregistered\"}')", "if self.sock_ready(self.sock): csock, _ = self.sock.accept() logging.info(f':New connection') unattributed_socks.add(csock) continue # check pool", "corresponding scewl secret secret_path = f'/secrets/{dev_id}_secret' if os.path.exists(secret_path): with open(secret_path, \"rb\") as secret_file:", "1) Given any SED with valid dev_id, establish path to SSS registration secret", "given a match # 4) Send some error given a discrepancy # #", "resp_op) # Record some error from reading in the SEDs {dev_id}_secrets folder. This", "responses are constructed below''' # Read in corresponding scewl secret secret_path = f'/secrets/{dev_id}_secret'", "SED as valid and distributing deployment wide keys. # # Registration: # 1)", "Requesting repeat transaction in the case that an SED state already reflects the", "int), ('csock', socket.socket)]) class SSS: def __init__(self, sockf): # Make sure the socket", "is part of the deployment. # # Deregistration is handled by sending deregistration", "len(data)) data += recvd # check for closed connection if not recvd: raise", "old_ids: del self.devs[dev_id] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('sockf', help='Path to socket to", "= socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.bind(sockf) self.sock.listen(10) self.devs = {} @staticmethod def sock_ready(sock, op='r'): rready,", "is done within respective dockerfiles and # this script primarily focuses on verifying", "dev in self.devs.values(): if dev.csock and self.sock_ready(dev.csock): try: self.handle_transaction(dev.csock) except (ConnectionResetError, BrokenPipeError): logging.info(f'{dev.id}:Connection", "and # this script primarily focuses on verifying an SED as valid and", "continue # check pool of unattributed sockets first for csock in unattributed_socks: try:", "SED with valid dev_id, establish path to SSS registration secret and scewl_secret #", "closed') unattributed_socks.remove(csock) csock.close() break # check pool of attributed sockets first old_ids =", "logging.info(f'{dev.id}:Connection closed') dev.csock.close() old_ids.append(dev.id) for dev_id in old_ids: del self.devs[dev_id] def parse_args(): parser", "in self.devs and self.devs[dev_id].status == op: resp_op = ALREADY logging.info(f'{dev_id}:already {\"Registered\" if op", "purposes for the 2021 MITRE eCTF competition, # and may not meet MITRE", "files to allow for a these # features. It should be noted that", "keep the SSS active # to received registration and deregistration messages before responding", "break except (ConnectionResetError, BrokenPipeError): logging.info(':Connection closed') unattributed_socks.remove(csock) csock.close() break # check pool of", "HMAC key: 64 bytes # Random seed: 32bytes elif op == REG: self.devs[dev_id]", "'r' else wready def handle_transaction(self, csock: socket.SocketType): logging.debug('handling transaction') data = b'' while", "Collegiate eCTF # SCEWL Security Server # <NAME> # # (c) 2021 The", "is being provided only for educational purposes for the 2021 MITRE eCTF competition,", "by sending deregistration message and removing registration secret from # the SED (see", "scewl_secret: logging.info(f'{dev_id}:expected: {checked_secret}, found: {scewl_secret}') resp_op = ALREADY logging.info(f'{dev_id}:key mismatch') body = struct.pack('<Hh',", "dev_id in old_ids: del self.devs[dev_id] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('sockf', help='Path to", "Read in corresponding scewl secret secret_path = f'/secrets/{dev_id}_secret' if os.path.exists(secret_path): with open(secret_path, \"rb\")", "received registration and deregistration messages before responding def start(self): unattributed_socks = set() #", "self.devs and self.devs[dev_id].status == op: resp_op = ALREADY logging.info(f'{dev_id}:already {\"Registered\" if op ==", "is handled by sending deregistration message and removing registration secret from # the", "<filename>sss/sss.py #!/usr/bin/python3 # 2021 Collegiate eCTF # SCEWL Security Server # <NAME> #", "active # to received registration and deregistration messages before responding def start(self): unattributed_socks", "ALREADY logging.info(f'{dev_id}:bad ID') body = struct.pack('<Hh', dev_id, resp_op) # Send response to SED", "given a discrepancy # # Succesful execution of this procedure means a given", "from # the SED (see dockerfiles/3_remove_sed.Dockerfile) import socket import select import struct import", "keys # its messages will be thrown out by any receiving SED which", "def __init__(self, sockf): # Make sure the socket does not already exist try:", "SSS active # to received registration and deregistration messages before responding def start(self):", "op, scewl_secret = struct.unpack('<HHHHHH64s', data) '''Message responses are constructed below''' # Read in", "import logging import os import secrets from typing import NamedTuple SSS_IP = 'localhost'", "# This is the Secure SCEWL Server that handles SED registration and key", "following methods reflect the provided insecure implementation and keep the SSS active #", "some error given a discrepancy # # Succesful execution of this procedure means", "Log this event # and back ALREADY resp_op into the response. Without deployment", "previous section resp = struct.pack('<2sHHH', b'SC', dev_id, SSS_ID, len(body)) + body logging.debug(f'Sending response", "section resp = struct.pack('<2sHHH', b'SC', dev_id, SSS_ID, len(body)) + body logging.debug(f'Sending response {repr(data)}')", "self.devs[dev_id] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('sockf', help='Path to socket to bind the", "self.sock_ready(csock): self.handle_transaction(csock) unattributed_socks.remove(csock) break except (ConnectionResetError, BrokenPipeError): logging.info(':Connection closed') unattributed_socks.remove(csock) csock.close() break #", "deployment wide keys. # # Registration: # 1) Given any SED with valid", "help='Path to socket to bind the SSS to') return parser.parse_args() def main(): args", "ALREADY logging.info(f'{dev_id}:key mismatch') body = struct.pack('<Hh', dev_id, resp_op) # Requesting repeat transaction in", "# features. It should be noted that any key generation is done within", "= self.sock.accept() logging.info(f':New connection') unattributed_socks.add(csock) continue # check pool of unattributed sockets first", "dev_id in self.devs and self.devs[dev_id].status == op: resp_op = ALREADY logging.info(f'{dev_id}:already {\"Registered\" if", "this event. elif dev_id in self.devs and self.devs[dev_id].status == op: resp_op = ALREADY", "methods reflect the provided insecure implementation and keep the SSS active # to", "# HMAC key: 64 bytes # Random seed: 32bytes elif op == REG:", "0, 1 logging.basicConfig(level=logging.INFO) Device = NamedTuple('Device', [('id', int), ('status', int), ('csock', socket.socket)]) class", "SED is valid and may communicate with other # deployed SEDs while through", "secret_path = f'/secrets/{dev_id}_secret' if os.path.exists(secret_path): with open(secret_path, \"rb\") as secret_file: # Read in", "# serve forever while True: # check for new client if self.sock_ready(self.sock): csock,", "logging.info(f'{dev_id}:expected: {checked_secret}, found: {scewl_secret}') resp_op = ALREADY logging.info(f'{dev_id}:key mismatch') body = struct.pack('<Hh', dev_id,", "# and may not meet MITRE standards for quality. Use this code at", "and key distribution for any given # deployment. Minimal changes have been made", "b'SC', dev_id, SSS_ID, len(body)) + body logging.debug(f'Sending response {repr(data)}') csock.send(resp) # The following", "# Read in corresponding scewl secret secret_path = f'/secrets/{dev_id}_secret' if os.path.exists(secret_path): with open(secret_path,", "discrepancy # # Succesful execution of this procedure means a given SED is", "response {repr(data)}') csock.send(resp) # The following methods reflect the provided insecure implementation and", "('status', int), ('csock', socket.socket)]) class SSS: def __init__(self, sockf): # Make sure the", "parser.add_argument('sockf', help='Path to socket to bind the SSS to') return parser.parse_args() def main():", "= secrets.token_bytes(32) body = struct.pack('<Hh16s32s64s', dev_id, resp_op, aes_key, seed, hmac_key) # Record deregistration", "self.sock.listen(10) self.devs = {} @staticmethod def sock_ready(sock, op='r'): rready, wready, _ = select.select([sock],", "SED constructed in the previous section resp = struct.pack('<2sHHH', b'SC', dev_id, SSS_ID, len(body))", "logging import os import secrets from typing import NamedTuple SSS_IP = 'localhost' SSS_ID", "and keep the SSS active # to received registration and deregistration messages before", "resp_op = ALREADY logging.info(f'{dev_id}:bad ID') body = struct.pack('<Hh', dev_id, resp_op) # Send response", "If an SED doesn't receive these keys # its messages will be thrown", "try: self.handle_transaction(dev.csock) except (ConnectionResetError, BrokenPipeError): logging.info(f'{dev.id}:Connection closed') dev.csock.close() old_ids.append(dev.id) for dev_id in old_ids:", "This is the Secure SCEWL Server that handles SED registration and key distribution", "already reflects the # received op. Log this event. elif dev_id in self.devs", "= ALREADY logging.info(f'{dev_id}:already {\"Registered\" if op == REG else \"Deregistered\"}') body = struct.pack('<Hh',", "struct.pack('<Hh', dev_id, resp_op) # Record registration transaction and read in keys, then pack", "scewl_secret that resides on the registering SED by comparing to the SSS's #", "SEDs {dev_id}_secrets folder. This may happen if # an SED is attempted to", "# 2021 Collegiate eCTF # SCEWL Security Server # <NAME> # # (c)", "and removing registration secret from # the SED (see dockerfiles/3_remove_sed.Dockerfile) import socket import", "# check pool of unattributed sockets first for csock in unattributed_socks: try: if", "registration secret and scewl_secret # 2) Validate the scewl_secret that resides on the", "SED is attempted to register, which should not be included on the deployment", "dev_id, resp_op) # Send response to SED constructed in the previous section resp", "data = b'' while len(data) < 76: recvd = csock.recv(76 - len(data)) data", "import NamedTuple SSS_IP = 'localhost' SSS_ID = 1 # mirroring scewl enum at", "Record registration transaction and read in keys, then pack into response. This is", "logging.debug('handling transaction') data = b'' while len(data) < 76: recvd = csock.recv(76 -", "on the deployment as specified # by the {dev_id}_secrets folders generated in dockerfiles/2b_create_sed_secrets.Dockerfile", "# 1) Given any SED with valid dev_id, establish path to SSS registration", "# This code is being provided only for educational purposes for the 2021", "REG with open(\"/secrets/aes_key\", \"rb\") as aes_file: aes_key = aes_file.read(16) with open(\"/secrets/hmac_key\", \"rb\") as", "Random seed (32B), given a match # 4) Send some error given a", "attributed sockets first old_ids = [] for dev in self.devs.values(): if dev.csock and", "check pool of attributed sockets first old_ids = [] for dev in self.devs.values():", "was verified previously to register and # hasn't already been deregistered. else: self.devs[dev_id]", "not be included on the deployment as specified # by the {dev_id}_secrets folders", "match # 4) Send some error given a discrepancy # # Succesful execution", "hmac_file.read(64) logging.info(f'{dev_id}:Registered') seed = secrets.token_bytes(32) body = struct.pack('<Hh16s32s64s', dev_id, resp_op, aes_key, seed, hmac_key)", "def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('sockf', help='Path to socket to bind the SSS", "code at your own risk! # # This is the Secure SCEWL Server", "an SED is attempted to register, which should not be included on the", "logging.basicConfig(level=logging.INFO) Device = NamedTuple('Device', [('id', int), ('status', int), ('csock', socket.socket)]) class SSS: def", "unattributed_socks.add(csock) continue # check pool of unattributed sockets first for csock in unattributed_socks:", "ALREADY, REG, DEREG = -1, 0, 1 logging.basicConfig(level=logging.INFO) Device = NamedTuple('Device', [('id', int),", "for quality. Use this code at your own risk! # # This is", "resp_op) # Record registration transaction and read in keys, then pack into response.", "unattributed_socks = set() # serve forever while True: # check for new client", "from typing import NamedTuple SSS_IP = 'localhost' SSS_ID = 1 # mirroring scewl", "= select.select([sock], [sock], [], 0) return rready if op == 'r' else wready", "open(secret_path, \"rb\") as secret_file: # Read in the registration secret for verification checked_secret", "comparing to the SSS's # registration secret # 3) Distribute AES key (16B),", "parser = argparse.ArgumentParser() parser.add_argument('sockf', help='Path to socket to bind the SSS to') return", "if not recvd: raise ConnectionResetError logging.debug(f'Received buffer: {repr(data)}') # Unpack message received from", "It should be noted that any key generation is done within respective dockerfiles", "and read in keys, then pack into response. This is # a valid", "DEREG, csock) resp_op = DEREG logging.info(f'{dev_id}:Deregistered') body = struct.pack('<Hh', dev_id, resp_op) # Record", "registration and deregistration messages before responding def start(self): unattributed_socks = set() # serve", "file is part of an example system for MITRE's 2021 Embedded System CTF", "op='r'): rready, wready, _ = select.select([sock], [sock], [], 0) return rready if op", "def main(): args = parse_args() # map of SCEWL IDs to statuses sss", "NamedTuple SSS_IP = 'localhost' SSS_ID = 1 # mirroring scewl enum at scewl.c:4", "Device = NamedTuple('Device', [('id', int), ('status', int), ('csock', socket.socket)]) class SSS: def __init__(self,", "argparse import logging import os import secrets from typing import NamedTuple SSS_IP =", "self.sock.accept() logging.info(f':New connection') unattributed_socks.add(csock) continue # check pool of unattributed sockets first for", "for an SED which was verified previously to register and # hasn't already", "reflects the # received op. Log this event. elif dev_id in self.devs and", "try: if self.sock_ready(csock): self.handle_transaction(csock) unattributed_socks.remove(csock) break except (ConnectionResetError, BrokenPipeError): logging.info(':Connection closed') unattributed_socks.remove(csock) csock.close()", "#!/usr/bin/python3 # 2021 Collegiate eCTF # SCEWL Security Server # <NAME> # #", "to allow for a these # features. It should be noted that any", "deployed SEDs while through use of the aformentioned keys. If an SED doesn't", "script primarily focuses on verifying an SED as valid and distributing deployment wide", "hmac_key = hmac_file.read(64) logging.info(f'{dev_id}:Registered') seed = secrets.token_bytes(32) body = struct.pack('<Hh16s32s64s', dev_id, resp_op, aes_key,", "aes_key = aes_file.read(16) with open(\"/secrets/hmac_key\", \"rb\") as hmac_file: hmac_key = hmac_file.read(64) logging.info(f'{dev_id}:Registered') seed", "# SCEWL Security Server # <NAME> # # (c) 2021 The MITRE Corporation", "1 logging.basicConfig(level=logging.INFO) Device = NamedTuple('Device', [('id', int), ('status', int), ('csock', socket.socket)]) class SSS:", "argparse.ArgumentParser() parser.add_argument('sockf', help='Path to socket to bind the SSS to') return parser.parse_args() def", "seed: 32bytes elif op == REG: self.devs[dev_id] = Device(dev_id, REG, csock) resp_op =", "invalid for registration. if checked_secret != scewl_secret: logging.info(f'{dev_id}:expected: {checked_secret}, found: {scewl_secret}') resp_op =", "to register, which should not be included on the deployment as specified #", "provided source files to allow for a these # features. It should be", "= b'' while len(data) < 76: recvd = csock.recv(76 - len(data)) data +=", "be thrown out by any receiving SED which is part of the deployment.", "else wready def handle_transaction(self, csock: socket.SocketType): logging.debug('handling transaction') data = b'' while len(data)", "and scewl_secret # 2) Validate the scewl_secret that resides on the registering SED", "considered invalid for registration. if checked_secret != scewl_secret: logging.info(f'{dev_id}:expected: {checked_secret}, found: {scewl_secret}') resp_op", "at scewl.c:4 ALREADY, REG, DEREG = -1, 0, 1 logging.basicConfig(level=logging.INFO) Device = NamedTuple('Device',", "an SED state already reflects the # received op. Log this event. elif", "DEREG = -1, 0, 1 logging.basicConfig(level=logging.INFO) Device = NamedTuple('Device', [('id', int), ('status', int),", "in keys, then pack into response. This is # a valid SED which", "(see dockerfiles/3_remove_sed.Dockerfile) import socket import select import struct import argparse import logging import", "32bytes elif op == REG: self.devs[dev_id] = Device(dev_id, REG, csock) resp_op = REG", "pool of attributed sockets first old_ids = [] for dev in self.devs.values(): if", "and deregistration messages before responding def start(self): unattributed_socks = set() # serve forever", "registration transaction and read in keys, then pack into response. This is #", "!= scewl_secret: logging.info(f'{dev_id}:expected: {checked_secret}, found: {scewl_secret}') resp_op = ALREADY logging.info(f'{dev_id}:key mismatch') body =", "resp_op, aes_key, seed, hmac_key) # Record deregistration for an SED which was verified", "recvd # check for closed connection if not recvd: raise ConnectionResetError logging.debug(f'Received buffer:", "# # This source file is part of an example system for MITRE's", "2021 MITRE eCTF competition, # and may not meet MITRE standards for quality.", "already exist try: os.unlink(sockf) except OSError: if os.path.exists(sockf): raise self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)", "provided by SED is invalid. Log this event # and back ALREADY resp_op", "(32B), given a match # 4) Send some error given a discrepancy #", "class SSS: def __init__(self, sockf): # Make sure the socket does not already", "attempted to register, which should not be included on the deployment as specified", "features. It should be noted that any key generation is done within respective", "generated in dockerfiles/2b_create_sed_secrets.Dockerfile else: resp_op = ALREADY logging.info(f'{dev_id}:bad ID') body = struct.pack('<Hh', dev_id,", "struct.unpack('<HHHHHH64s', data) '''Message responses are constructed below''' # Read in corresponding scewl secret", "= NamedTuple('Device', [('id', int), ('status', int), ('csock', socket.socket)]) class SSS: def __init__(self, sockf):", "[sock], [], 0) return rready if op == 'r' else wready def handle_transaction(self,", "response. Without deployment keys, this SED is # considered invalid for registration. if", "communicate in the deployment. # AES key: 16 bytes # HMAC key: 64", "the # received op. Log this event. elif dev_id in self.devs and self.devs[dev_id].status", "import select import struct import argparse import logging import os import secrets from", "def handle_transaction(self, csock: socket.SocketType): logging.debug('handling transaction') data = b'' while len(data) < 76:", "== 'r' else wready def handle_transaction(self, csock: socket.SocketType): logging.debug('handling transaction') data = b''", "data) '''Message responses are constructed below''' # Read in corresponding scewl secret secret_path", "message and removing registration secret from # the SED (see dockerfiles/3_remove_sed.Dockerfile) import socket", "= 'localhost' SSS_ID = 1 # mirroring scewl enum at scewl.c:4 ALREADY, REG,", "Validate the scewl_secret that resides on the registering SED by comparing to the", "except (ConnectionResetError, BrokenPipeError): logging.info(f'{dev.id}:Connection closed') dev.csock.close() old_ids.append(dev.id) for dev_id in old_ids: del self.devs[dev_id]", "AES key (16B), HMAC key (64B) and Random seed (32B), given a match", "code is being provided only for educational purposes for the 2021 MITRE eCTF", "SED which is part of the deployment. # # Deregistration is handled by", "with open(secret_path, \"rb\") as secret_file: # Read in the registration secret for verification", "= 1 # mirroring scewl enum at scewl.c:4 ALREADY, REG, DEREG = -1,", "old_ids = [] for dev in self.devs.values(): if dev.csock and self.sock_ready(dev.csock): try: self.handle_transaction(dev.csock)", "primarily focuses on verifying an SED as valid and distributing deployment wide keys.", "eCTF competition, # and may not meet MITRE standards for quality. Use this", "if os.path.exists(secret_path): with open(secret_path, \"rb\") as secret_file: # Read in the registration secret", "Device(dev_id, REG, csock) resp_op = REG with open(\"/secrets/aes_key\", \"rb\") as aes_file: aes_key =", "# considered invalid for registration. if checked_secret != scewl_secret: logging.info(f'{dev_id}:expected: {checked_secret}, found: {scewl_secret}')", "secret secret_path = f'/secrets/{dev_id}_secret' if os.path.exists(secret_path): with open(secret_path, \"rb\") as secret_file: # Read", "mismatch, registration key provided by SED is invalid. Log this event # and", "< 76: recvd = csock.recv(76 - len(data)) data += recvd # check for", "secret_file: # Read in the registration secret for verification checked_secret = secret_file.read(64) #", "the deployment. # # Deregistration is handled by sending deregistration message and removing", "first old_ids = [] for dev in self.devs.values(): if dev.csock and self.sock_ready(dev.csock): try:", "registration key provided by SED is invalid. Log this event # and back", "SED (see dockerfiles/3_remove_sed.Dockerfile) import socket import select import struct import argparse import logging", "these keys # its messages will be thrown out by any receiving SED", "# This source file is part of an example system for MITRE's 2021", "in dockerfiles/2b_create_sed_secrets.Dockerfile else: resp_op = ALREADY logging.info(f'{dev_id}:bad ID') body = struct.pack('<Hh', dev_id, resp_op)", "response. This is # a valid SED which can communicate in the deployment.", "serve forever while True: # check for new client if self.sock_ready(self.sock): csock, _", "and Random seed (32B), given a match # 4) Send some error given", "recvd: raise ConnectionResetError logging.debug(f'Received buffer: {repr(data)}') # Unpack message received from a given", "= f'/secrets/{dev_id}_secret' if os.path.exists(secret_path): with open(secret_path, \"rb\") as secret_file: # Read in the", "case that an SED state already reflects the # received op. Log this", "logging.info(':Connection closed') unattributed_socks.remove(csock) csock.close() break # check pool of attributed sockets first old_ids", "# check pool of attributed sockets first old_ids = [] for dev in", "respective dockerfiles and # this script primarily focuses on verifying an SED as", "# check for closed connection if not recvd: raise ConnectionResetError logging.debug(f'Received buffer: {repr(data)}')", "resp_op = ALREADY logging.info(f'{dev_id}:key mismatch') body = struct.pack('<Hh', dev_id, resp_op) # Requesting repeat", "self.sock_ready(self.sock): csock, _ = self.sock.accept() logging.info(f':New connection') unattributed_socks.add(csock) continue # check pool of", "for educational purposes for the 2021 MITRE eCTF competition, # and may not", "which can communicate in the deployment. # AES key: 16 bytes # HMAC", "This source file is part of an example system for MITRE's 2021 Embedded", "resp_op into the response. Without deployment keys, this SED is # considered invalid", "to bind the SSS to') return parser.parse_args() def main(): args = parse_args() #", "to socket to bind the SSS to') return parser.parse_args() def main(): args =", "# map of SCEWL IDs to statuses sss = SSS(args.sockf) sss.start() if __name__", "enum at scewl.c:4 ALREADY, REG, DEREG = -1, 0, 1 logging.basicConfig(level=logging.INFO) Device =", "verification checked_secret = secret_file.read(64) # Scewl_secret mismatch, registration key provided by SED is", "only for educational purposes for the 2021 MITRE eCTF competition, # and may", "SSS to') return parser.parse_args() def main(): args = parse_args() # map of SCEWL", "noted that any key generation is done within respective dockerfiles and # this", "op == REG: self.devs[dev_id] = Device(dev_id, REG, csock) resp_op = REG with open(\"/secrets/aes_key\",", "resp_op) # Send response to SED constructed in the previous section resp =", "and distributing deployment wide keys. # # Registration: # 1) Given any SED", "checked_secret = secret_file.read(64) # Scewl_secret mismatch, registration key provided by SED is invalid.", "{scewl_secret}') resp_op = ALREADY logging.info(f'{dev_id}:key mismatch') body = struct.pack('<Hh', dev_id, resp_op) # Requesting", "else \"Deregistered\"}') body = struct.pack('<Hh', dev_id, resp_op) # Record registration transaction and read", "= struct.pack('<Hh16s32s64s', dev_id, resp_op, aes_key, seed, hmac_key) # Record deregistration for an SED", "wide keys. # # Registration: # 1) Given any SED with valid dev_id,", "= {} @staticmethod def sock_ready(sock, op='r'): rready, wready, _ = select.select([sock], [sock], [],", "+= recvd # check for closed connection if not recvd: raise ConnectionResetError logging.debug(f'Received", "struct.pack('<Hh16s32s64s', dev_id, resp_op, aes_key, seed, hmac_key) # Record deregistration for an SED which", "to register and # hasn't already been deregistered. else: self.devs[dev_id] = Device(dev_id, DEREG,", "a given SED is valid and may communicate with other # deployed SEDs", "(ConnectionResetError, BrokenPipeError): logging.info(':Connection closed') unattributed_socks.remove(csock) csock.close() break # check pool of attributed sockets", "64 bytes # Random seed: 32bytes elif op == REG: self.devs[dev_id] = Device(dev_id,", "len(body)) + body logging.debug(f'Sending response {repr(data)}') csock.send(resp) # The following methods reflect the", "MITRE Corporation # # This source file is part of an example system", "secret from # the SED (see dockerfiles/3_remove_sed.Dockerfile) import socket import select import struct", "to SSS registration secret and scewl_secret # 2) Validate the scewl_secret that resides", "# 2) Validate the scewl_secret that resides on the registering SED by comparing", "dev_id, resp_op, aes_key, seed, hmac_key) # Record deregistration for an SED which was", "raise self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.bind(sockf) self.sock.listen(10) self.devs = {} @staticmethod def sock_ready(sock,", "socket.socket)]) class SSS: def __init__(self, sockf): # Make sure the socket does not", "key (16B), HMAC key (64B) and Random seed (32B), given a match #", "previously to register and # hasn't already been deregistered. else: self.devs[dev_id] = Device(dev_id,", "implementation and keep the SSS active # to received registration and deregistration messages", "verifying an SED as valid and distributing deployment wide keys. # # Registration:", "to the SSS's # registration secret # 3) Distribute AES key (16B), HMAC", "= csock.recv(76 - len(data)) data += recvd # check for closed connection if", "SCEWL Security Server # <NAME> # # (c) 2021 The MITRE Corporation #", "part of an example system for MITRE's 2021 Embedded System CTF (eCTF). #", "be noted that any key generation is done within respective dockerfiles and #", "as aes_file: aes_key = aes_file.read(16) with open(\"/secrets/hmac_key\", \"rb\") as hmac_file: hmac_key = hmac_file.read(64)", "path to SSS registration secret and scewl_secret # 2) Validate the scewl_secret that", "be included on the deployment as specified # by the {dev_id}_secrets folders generated", "self.handle_transaction(dev.csock) except (ConnectionResetError, BrokenPipeError): logging.info(f'{dev.id}:Connection closed') dev.csock.close() old_ids.append(dev.id) for dev_id in old_ids: del", "happen if # an SED is attempted to register, which should not be", "hmac_file: hmac_key = hmac_file.read(64) logging.info(f'{dev_id}:Registered') seed = secrets.token_bytes(32) body = struct.pack('<Hh16s32s64s', dev_id, resp_op,", "the SSS to') return parser.parse_args() def main(): args = parse_args() # map of", "# hasn't already been deregistered. else: self.devs[dev_id] = Device(dev_id, DEREG, csock) resp_op =", "Secure SCEWL Server that handles SED registration and key distribution for any given", "0) return rready if op == 'r' else wready def handle_transaction(self, csock: socket.SocketType):", "csock.send(resp) # The following methods reflect the provided insecure implementation and keep the", "done within respective dockerfiles and # this script primarily focuses on verifying an", "valid and may communicate with other # deployed SEDs while through use of", "sockets first for csock in unattributed_socks: try: if self.sock_ready(csock): self.handle_transaction(csock) unattributed_socks.remove(csock) break except", "in corresponding scewl secret secret_path = f'/secrets/{dev_id}_secret' if os.path.exists(secret_path): with open(secret_path, \"rb\") as", "and # hasn't already been deregistered. else: self.devs[dev_id] = Device(dev_id, DEREG, csock) resp_op", "os.path.exists(secret_path): with open(secret_path, \"rb\") as secret_file: # Read in the registration secret for", "# Scewl_secret mismatch, registration key provided by SED is invalid. Log this event", "receive these keys # its messages will be thrown out by any receiving", "16 bytes # HMAC key: 64 bytes # Random seed: 32bytes elif op", "The MITRE Corporation # # This source file is part of an example", "\"rb\") as aes_file: aes_key = aes_file.read(16) with open(\"/secrets/hmac_key\", \"rb\") as hmac_file: hmac_key =", "not meet MITRE standards for quality. Use this code at your own risk!", "= ALREADY logging.info(f'{dev_id}:key mismatch') body = struct.pack('<Hh', dev_id, resp_op) # Requesting repeat transaction", "= hmac_file.read(64) logging.info(f'{dev_id}:Registered') seed = secrets.token_bytes(32) body = struct.pack('<Hh16s32s64s', dev_id, resp_op, aes_key, seed,", "REG, DEREG = -1, 0, 1 logging.basicConfig(level=logging.INFO) Device = NamedTuple('Device', [('id', int), ('status',", "with valid dev_id, establish path to SSS registration secret and scewl_secret # 2)", "standards for quality. Use this code at your own risk! # # This", "# Requesting repeat transaction in the case that an SED state already reflects", "aes_file: aes_key = aes_file.read(16) with open(\"/secrets/hmac_key\", \"rb\") as hmac_file: hmac_key = hmac_file.read(64) logging.info(f'{dev_id}:Registered')", "is attempted to register, which should not be included on the deployment as", "an example system for MITRE's 2021 Embedded System CTF (eCTF). # This code", "responding def start(self): unattributed_socks = set() # serve forever while True: # check", "into response. This is # a valid SED which can communicate in the", "socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.bind(sockf) self.sock.listen(10) self.devs = {} @staticmethod def sock_ready(sock, op='r'): rready, wready,", "constructed below''' # Read in corresponding scewl secret secret_path = f'/secrets/{dev_id}_secret' if os.path.exists(secret_path):", "back ALREADY resp_op into the response. Without deployment keys, this SED is #", "been deregistered. else: self.devs[dev_id] = Device(dev_id, DEREG, csock) resp_op = DEREG logging.info(f'{dev_id}:Deregistered') body", "This may happen if # an SED is attempted to register, which should", "secret and scewl_secret # 2) Validate the scewl_secret that resides on the registering", "in the previous section resp = struct.pack('<2sHHH', b'SC', dev_id, SSS_ID, len(body)) + body", "transaction and read in keys, then pack into response. This is # a", "your own risk! # # This is the Secure SCEWL Server that handles", "provided insecure implementation and keep the SSS active # to received registration and", "# received op. Log this event. elif dev_id in self.devs and self.devs[dev_id].status ==", "registering SED by comparing to the SSS's # registration secret # 3) Distribute", "csock) resp_op = REG with open(\"/secrets/aes_key\", \"rb\") as aes_file: aes_key = aes_file.read(16) with", "== REG else \"Deregistered\"}') body = struct.pack('<Hh', dev_id, resp_op) # Record registration transaction", "<NAME> # # (c) 2021 The MITRE Corporation # # This source file", "the socket does not already exist try: os.unlink(sockf) except OSError: if os.path.exists(sockf): raise", "secret_file.read(64) # Scewl_secret mismatch, registration key provided by SED is invalid. Log this", "response to SED constructed in the previous section resp = struct.pack('<2sHHH', b'SC', dev_id,", "an SED doesn't receive these keys # its messages will be thrown out", "= struct.pack('<Hh', dev_id, resp_op) # Requesting repeat transaction in the case that an", "Unpack message received from a given SED _, _, _, _, dev_id, op,", "self.devs[dev_id] = Device(dev_id, DEREG, csock) resp_op = DEREG logging.info(f'{dev_id}:Deregistered') body = struct.pack('<Hh', dev_id,", "folder. This may happen if # an SED is attempted to register, which", "which was verified previously to register and # hasn't already been deregistered. else:", "the response. Without deployment keys, this SED is # considered invalid for registration.", "communicate with other # deployed SEDs while through use of the aformentioned keys.", "for MITRE's 2021 Embedded System CTF (eCTF). # This code is being provided", "wready, _ = select.select([sock], [sock], [], 0) return rready if op == 'r'", "SED is invalid. Log this event # and back ALREADY resp_op into the", "deregistration messages before responding def start(self): unattributed_socks = set() # serve forever while", "body = struct.pack('<Hh16s32s64s', dev_id, resp_op, aes_key, seed, hmac_key) # Record deregistration for an", "any given # deployment. Minimal changes have been made to the provided source", "import socket import select import struct import argparse import logging import os import", "OSError: if os.path.exists(sockf): raise self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.bind(sockf) self.sock.listen(10) self.devs = {}", "- len(data)) data += recvd # check for closed connection if not recvd:", "socket.SocketType): logging.debug('handling transaction') data = b'' while len(data) < 76: recvd = csock.recv(76", "resp_op = ALREADY logging.info(f'{dev_id}:already {\"Registered\" if op == REG else \"Deregistered\"}') body =", "BrokenPipeError): logging.info(f'{dev.id}:Connection closed') dev.csock.close() old_ids.append(dev.id) for dev_id in old_ids: del self.devs[dev_id] def parse_args():", "CTF (eCTF). # This code is being provided only for educational purposes for", "for csock in unattributed_socks: try: if self.sock_ready(csock): self.handle_transaction(csock) unattributed_socks.remove(csock) break except (ConnectionResetError, BrokenPipeError):", "resp_op = DEREG logging.info(f'{dev_id}:Deregistered') body = struct.pack('<Hh', dev_id, resp_op) # Record some error", "# to received registration and deregistration messages before responding def start(self): unattributed_socks =", "parse_args() # map of SCEWL IDs to statuses sss = SSS(args.sockf) sss.start() if", "while True: # check for new client if self.sock_ready(self.sock): csock, _ = self.sock.accept()", "transaction') data = b'' while len(data) < 76: recvd = csock.recv(76 - len(data))", "if op == REG else \"Deregistered\"}') body = struct.pack('<Hh', dev_id, resp_op) # Record", "an SED which was verified previously to register and # hasn't already been", "below''' # Read in corresponding scewl secret secret_path = f'/secrets/{dev_id}_secret' if os.path.exists(secret_path): with", "invalid. Log this event # and back ALREADY resp_op into the response. Without", "struct.pack('<2sHHH', b'SC', dev_id, SSS_ID, len(body)) + body logging.debug(f'Sending response {repr(data)}') csock.send(resp) # The", "# # (c) 2021 The MITRE Corporation # # This source file is", "register, which should not be included on the deployment as specified # by", "in the registration secret for verification checked_secret = secret_file.read(64) # Scewl_secret mismatch, registration", "# # Deregistration is handled by sending deregistration message and removing registration secret", "aformentioned keys. If an SED doesn't receive these keys # its messages will", "self.handle_transaction(csock) unattributed_socks.remove(csock) break except (ConnectionResetError, BrokenPipeError): logging.info(':Connection closed') unattributed_socks.remove(csock) csock.close() break # check", "and self.devs[dev_id].status == op: resp_op = ALREADY logging.info(f'{dev_id}:already {\"Registered\" if op == REG", "means a given SED is valid and may communicate with other # deployed", "Make sure the socket does not already exist try: os.unlink(sockf) except OSError: if", "check pool of unattributed sockets first for csock in unattributed_socks: try: if self.sock_ready(csock):", "the scewl_secret that resides on the registering SED by comparing to the SSS's", "# a valid SED which can communicate in the deployment. # AES key:", "registration and key distribution for any given # deployment. Minimal changes have been", "Without deployment keys, this SED is # considered invalid for registration. if checked_secret", "scewl enum at scewl.c:4 ALREADY, REG, DEREG = -1, 0, 1 logging.basicConfig(level=logging.INFO) Device", "# 3) Distribute AES key (16B), HMAC key (64B) and Random seed (32B),", "dev_id, establish path to SSS registration secret and scewl_secret # 2) Validate the", "within respective dockerfiles and # this script primarily focuses on verifying an SED", "Distribute AES key (16B), HMAC key (64B) and Random seed (32B), given a", "self.sock_ready(dev.csock): try: self.handle_transaction(dev.csock) except (ConnectionResetError, BrokenPipeError): logging.info(f'{dev.id}:Connection closed') dev.csock.close() old_ids.append(dev.id) for dev_id in", "thrown out by any receiving SED which is part of the deployment. #", "= -1, 0, 1 logging.basicConfig(level=logging.INFO) Device = NamedTuple('Device', [('id', int), ('status', int), ('csock',", "# and back ALREADY resp_op into the response. Without deployment keys, this SED", "# The following methods reflect the provided insecure implementation and keep the SSS", "csock: socket.SocketType): logging.debug('handling transaction') data = b'' while len(data) < 76: recvd =", "keys. # # Registration: # 1) Given any SED with valid dev_id, establish", "if checked_secret != scewl_secret: logging.info(f'{dev_id}:expected: {checked_secret}, found: {scewl_secret}') resp_op = ALREADY logging.info(f'{dev_id}:key mismatch')", "seed, hmac_key) # Record deregistration for an SED which was verified previously to", "csock in unattributed_socks: try: if self.sock_ready(csock): self.handle_transaction(csock) unattributed_socks.remove(csock) break except (ConnectionResetError, BrokenPipeError): logging.info(':Connection", "socket import select import struct import argparse import logging import os import secrets", "# mirroring scewl enum at scewl.c:4 ALREADY, REG, DEREG = -1, 0, 1", "bytes # HMAC key: 64 bytes # Random seed: 32bytes elif op ==", "deployment. # # Deregistration is handled by sending deregistration message and removing registration", "_ = self.sock.accept() logging.info(f':New connection') unattributed_socks.add(csock) continue # check pool of unattributed sockets", "reflect the provided insecure implementation and keep the SSS active # to received", "This is # a valid SED which can communicate in the deployment. #", "main(): args = parse_args() # map of SCEWL IDs to statuses sss =", "SSS's # registration secret # 3) Distribute AES key (16B), HMAC key (64B)", "socket to bind the SSS to') return parser.parse_args() def main(): args = parse_args()", "logging.info(f'{dev_id}:Registered') seed = secrets.token_bytes(32) body = struct.pack('<Hh16s32s64s', dev_id, resp_op, aes_key, seed, hmac_key) #", "of an example system for MITRE's 2021 Embedded System CTF (eCTF). # This", "other # deployed SEDs while through use of the aformentioned keys. If an", "are constructed below''' # Read in corresponding scewl secret secret_path = f'/secrets/{dev_id}_secret' if", "then pack into response. This is # a valid SED which can communicate", "found: {scewl_secret}') resp_op = ALREADY logging.info(f'{dev_id}:key mismatch') body = struct.pack('<Hh', dev_id, resp_op) #", "csock.close() break # check pool of attributed sockets first old_ids = [] for", "return parser.parse_args() def main(): args = parse_args() # map of SCEWL IDs to", "if os.path.exists(sockf): raise self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.bind(sockf) self.sock.listen(10) self.devs = {} @staticmethod", "Minimal changes have been made to the provided source files to allow for", "= DEREG logging.info(f'{dev_id}:Deregistered') body = struct.pack('<Hh', dev_id, resp_op) # Record some error from", "through use of the aformentioned keys. If an SED doesn't receive these keys", "been made to the provided source files to allow for a these #", "scewl_secret = struct.unpack('<HHHHHH64s', data) '''Message responses are constructed below''' # Read in corresponding", "= argparse.ArgumentParser() parser.add_argument('sockf', help='Path to socket to bind the SSS to') return parser.parse_args()", "a these # features. It should be noted that any key generation is", "= secret_file.read(64) # Scewl_secret mismatch, registration key provided by SED is invalid. Log", "error given a discrepancy # # Succesful execution of this procedure means a", "# this script primarily focuses on verifying an SED as valid and distributing", "3) Distribute AES key (16B), HMAC key (64B) and Random seed (32B), given", "key: 64 bytes # Random seed: 32bytes elif op == REG: self.devs[dev_id] =", "resp_op = REG with open(\"/secrets/aes_key\", \"rb\") as aes_file: aes_key = aes_file.read(16) with open(\"/secrets/hmac_key\",", "in the SEDs {dev_id}_secrets folder. This may happen if # an SED is", "MITRE eCTF competition, # and may not meet MITRE standards for quality. Use", "Device(dev_id, DEREG, csock) resp_op = DEREG logging.info(f'{dev_id}:Deregistered') body = struct.pack('<Hh', dev_id, resp_op) #", "rready, wready, _ = select.select([sock], [sock], [], 0) return rready if op ==", "struct.pack('<Hh', dev_id, resp_op) # Requesting repeat transaction in the case that an SED", "= struct.unpack('<HHHHHH64s', data) '''Message responses are constructed below''' # Read in corresponding scewl", "Server # <NAME> # # (c) 2021 The MITRE Corporation # # This", "ConnectionResetError logging.debug(f'Received buffer: {repr(data)}') # Unpack message received from a given SED _,", "struct.pack('<Hh', dev_id, resp_op) # Record some error from reading in the SEDs {dev_id}_secrets", "before responding def start(self): unattributed_socks = set() # serve forever while True: #", "messages will be thrown out by any receiving SED which is part of", "select.select([sock], [sock], [], 0) return rready if op == 'r' else wready def", "REG else \"Deregistered\"}') body = struct.pack('<Hh', dev_id, resp_op) # Record registration transaction and", "if self.sock_ready(csock): self.handle_transaction(csock) unattributed_socks.remove(csock) break except (ConnectionResetError, BrokenPipeError): logging.info(':Connection closed') unattributed_socks.remove(csock) csock.close() break", "scewl secret secret_path = f'/secrets/{dev_id}_secret' if os.path.exists(secret_path): with open(secret_path, \"rb\") as secret_file: #", "key: 16 bytes # HMAC key: 64 bytes # Random seed: 32bytes elif", "connection if not recvd: raise ConnectionResetError logging.debug(f'Received buffer: {repr(data)}') # Unpack message received", "if op == 'r' else wready def handle_transaction(self, csock: socket.SocketType): logging.debug('handling transaction') data", "select import struct import argparse import logging import os import secrets from typing", "its messages will be thrown out by any receiving SED which is part", "NamedTuple('Device', [('id', int), ('status', int), ('csock', socket.socket)]) class SSS: def __init__(self, sockf): #", "logging.info(f'{dev_id}:Deregistered') body = struct.pack('<Hh', dev_id, resp_op) # Record some error from reading in", "provided only for educational purposes for the 2021 MITRE eCTF competition, # and", "SED is # considered invalid for registration. if checked_secret != scewl_secret: logging.info(f'{dev_id}:expected: {checked_secret},", "by the {dev_id}_secrets folders generated in dockerfiles/2b_create_sed_secrets.Dockerfile else: resp_op = ALREADY logging.info(f'{dev_id}:bad ID')", "MITRE standards for quality. Use this code at your own risk! # #", "def sock_ready(sock, op='r'): rready, wready, _ = select.select([sock], [sock], [], 0) return rready", "into the response. Without deployment keys, this SED is # considered invalid for", "from reading in the SEDs {dev_id}_secrets folder. This may happen if # an", "2021 The MITRE Corporation # # This source file is part of an", "= REG with open(\"/secrets/aes_key\", \"rb\") as aes_file: aes_key = aes_file.read(16) with open(\"/secrets/hmac_key\", \"rb\")", "socket.SOCK_STREAM) self.sock.bind(sockf) self.sock.listen(10) self.devs = {} @staticmethod def sock_ready(sock, op='r'): rready, wready, _", "unattributed sockets first for csock in unattributed_socks: try: if self.sock_ready(csock): self.handle_transaction(csock) unattributed_socks.remove(csock) break", "@staticmethod def sock_ready(sock, op='r'): rready, wready, _ = select.select([sock], [sock], [], 0) return", "dockerfiles/3_remove_sed.Dockerfile) import socket import select import struct import argparse import logging import os", "= struct.pack('<Hh', dev_id, resp_op) # Record registration transaction and read in keys, then", "\"Deregistered\"}') body = struct.pack('<Hh', dev_id, resp_op) # Record registration transaction and read in", "keys, then pack into response. This is # a valid SED which can", "rready if op == 'r' else wready def handle_transaction(self, csock: socket.SocketType): logging.debug('handling transaction')", "import os import secrets from typing import NamedTuple SSS_IP = 'localhost' SSS_ID =", "else: resp_op = ALREADY logging.info(f'{dev_id}:bad ID') body = struct.pack('<Hh', dev_id, resp_op) # Send", "(eCTF). # This code is being provided only for educational purposes for the", "= parse_args() # map of SCEWL IDs to statuses sss = SSS(args.sockf) sss.start()", "for a these # features. It should be noted that any key generation", "the previous section resp = struct.pack('<2sHHH', b'SC', dev_id, SSS_ID, len(body)) + body logging.debug(f'Sending", "open(\"/secrets/aes_key\", \"rb\") as aes_file: aes_key = aes_file.read(16) with open(\"/secrets/hmac_key\", \"rb\") as hmac_file: hmac_key", "risk! # # This is the Secure SCEWL Server that handles SED registration", "dev_id, resp_op) # Record some error from reading in the SEDs {dev_id}_secrets folder.", "# AES key: 16 bytes # HMAC key: 64 bytes # Random seed:", "{dev_id}_secrets folder. This may happen if # an SED is attempted to register,", "first for csock in unattributed_socks: try: if self.sock_ready(csock): self.handle_transaction(csock) unattributed_socks.remove(csock) break except (ConnectionResetError,", "for dev in self.devs.values(): if dev.csock and self.sock_ready(dev.csock): try: self.handle_transaction(dev.csock) except (ConnectionResetError, BrokenPipeError):", "logging.info(f':New connection') unattributed_socks.add(csock) continue # check pool of unattributed sockets first for csock", "while len(data) < 76: recvd = csock.recv(76 - len(data)) data += recvd #", "except (ConnectionResetError, BrokenPipeError): logging.info(':Connection closed') unattributed_socks.remove(csock) csock.close() break # check pool of attributed", "HMAC key (64B) and Random seed (32B), given a match # 4) Send", "can communicate in the deployment. # AES key: 16 bytes # HMAC key:", "state already reflects the # received op. Log this event. elif dev_id in", "received op. Log this event. elif dev_id in self.devs and self.devs[dev_id].status == op:", "aes_file.read(16) with open(\"/secrets/hmac_key\", \"rb\") as hmac_file: hmac_key = hmac_file.read(64) logging.info(f'{dev_id}:Registered') seed = secrets.token_bytes(32)", "included on the deployment as specified # by the {dev_id}_secrets folders generated in", "bytes # Random seed: 32bytes elif op == REG: self.devs[dev_id] = Device(dev_id, REG,", "given SED is valid and may communicate with other # deployed SEDs while", "{} @staticmethod def sock_ready(sock, op='r'): rready, wready, _ = select.select([sock], [sock], [], 0)", "with open(\"/secrets/aes_key\", \"rb\") as aes_file: aes_key = aes_file.read(16) with open(\"/secrets/hmac_key\", \"rb\") as hmac_file:", "SED _, _, _, _, dev_id, op, scewl_secret = struct.unpack('<HHHHHH64s', data) '''Message responses", "self.devs = {} @staticmethod def sock_ready(sock, op='r'): rready, wready, _ = select.select([sock], [sock],", "does not already exist try: os.unlink(sockf) except OSError: if os.path.exists(sockf): raise self.sock =", "received from a given SED _, _, _, _, dev_id, op, scewl_secret =", "Embedded System CTF (eCTF). # This code is being provided only for educational", "{repr(data)}') csock.send(resp) # The following methods reflect the provided insecure implementation and keep", "System CTF (eCTF). # This code is being provided only for educational purposes", "break # check pool of attributed sockets first old_ids = [] for dev", "# by the {dev_id}_secrets folders generated in dockerfiles/2b_create_sed_secrets.Dockerfile else: resp_op = ALREADY logging.info(f'{dev_id}:bad", "unattributed_socks: try: if self.sock_ready(csock): self.handle_transaction(csock) unattributed_socks.remove(csock) break except (ConnectionResetError, BrokenPipeError): logging.info(':Connection closed') unattributed_socks.remove(csock)", "# (c) 2021 The MITRE Corporation # # This source file is part", "as specified # by the {dev_id}_secrets folders generated in dockerfiles/2b_create_sed_secrets.Dockerfile else: resp_op =", "# its messages will be thrown out by any receiving SED which is", "for dev_id in old_ids: del self.devs[dev_id] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('sockf', help='Path", "and self.sock_ready(dev.csock): try: self.handle_transaction(dev.csock) except (ConnectionResetError, BrokenPipeError): logging.info(f'{dev.id}:Connection closed') dev.csock.close() old_ids.append(dev.id) for dev_id", "socket does not already exist try: os.unlink(sockf) except OSError: if os.path.exists(sockf): raise self.sock", "= struct.pack('<Hh', dev_id, resp_op) # Send response to SED constructed in the previous", "logging.debug(f'Sending response {repr(data)}') csock.send(resp) # The following methods reflect the provided insecure implementation", "struct import argparse import logging import os import secrets from typing import NamedTuple", "insecure implementation and keep the SSS active # to received registration and deregistration", "system for MITRE's 2021 Embedded System CTF (eCTF). # This code is being", "SED registration and key distribution for any given # deployment. Minimal changes have", "for the 2021 MITRE eCTF competition, # and may not meet MITRE standards", "deregistration message and removing registration secret from # the SED (see dockerfiles/3_remove_sed.Dockerfile) import", "bind the SSS to') return parser.parse_args() def main(): args = parse_args() # map", "deployment keys, this SED is # considered invalid for registration. if checked_secret !=", "execution of this procedure means a given SED is valid and may communicate", "[], 0) return rready if op == 'r' else wready def handle_transaction(self, csock:", "dev.csock.close() old_ids.append(dev.id) for dev_id in old_ids: del self.devs[dev_id] def parse_args(): parser = argparse.ArgumentParser()", "# Record deregistration for an SED which was verified previously to register and", "SED by comparing to the SSS's # registration secret # 3) Distribute AES", "which should not be included on the deployment as specified # by the", "# Record registration transaction and read in keys, then pack into response. This", "# # Succesful execution of this procedure means a given SED is valid", "made to the provided source files to allow for a these # features.", "hmac_key) # Record deregistration for an SED which was verified previously to register", "op == 'r' else wready def handle_transaction(self, csock: socket.SocketType): logging.debug('handling transaction') data =", "76: recvd = csock.recv(76 - len(data)) data += recvd # check for closed", "any key generation is done within respective dockerfiles and # this script primarily", "2021 Collegiate eCTF # SCEWL Security Server # <NAME> # # (c) 2021", "this procedure means a given SED is valid and may communicate with other", "is part of an example system for MITRE's 2021 Embedded System CTF (eCTF).", "import struct import argparse import logging import os import secrets from typing import", "# Make sure the socket does not already exist try: os.unlink(sockf) except OSError:", "the SSS active # to received registration and deregistration messages before responding def", "part of the deployment. # # Deregistration is handled by sending deregistration message", "self.devs.values(): if dev.csock and self.sock_ready(dev.csock): try: self.handle_transaction(dev.csock) except (ConnectionResetError, BrokenPipeError): logging.info(f'{dev.id}:Connection closed') dev.csock.close()", "a discrepancy # # Succesful execution of this procedure means a given SED", "which is part of the deployment. # # Deregistration is handled by sending", "on verifying an SED as valid and distributing deployment wide keys. # #", "-1, 0, 1 logging.basicConfig(level=logging.INFO) Device = NamedTuple('Device', [('id', int), ('status', int), ('csock', socket.socket)])", "meet MITRE standards for quality. Use this code at your own risk! #", "body = struct.pack('<Hh', dev_id, resp_op) # Record registration transaction and read in keys,", "# deployment. Minimal changes have been made to the provided source files to", "Server that handles SED registration and key distribution for any given # deployment.", "the SED (see dockerfiles/3_remove_sed.Dockerfile) import socket import select import struct import argparse import", "SED state already reflects the # received op. Log this event. elif dev_id", "ALREADY logging.info(f'{dev_id}:already {\"Registered\" if op == REG else \"Deregistered\"}') body = struct.pack('<Hh', dev_id,", "dev_id, SSS_ID, len(body)) + body logging.debug(f'Sending response {repr(data)}') csock.send(resp) # The following methods", "'''Message responses are constructed below''' # Read in corresponding scewl secret secret_path =", "SCEWL IDs to statuses sss = SSS(args.sockf) sss.start() if __name__ == '__main__': main()", "is # a valid SED which can communicate in the deployment. # AES", "ALREADY resp_op into the response. Without deployment keys, this SED is # considered", "SSS_IP = 'localhost' SSS_ID = 1 # mirroring scewl enum at scewl.c:4 ALREADY,", "to') return parser.parse_args() def main(): args = parse_args() # map of SCEWL IDs", "a valid SED which can communicate in the deployment. # AES key: 16", "forever while True: # check for new client if self.sock_ready(self.sock): csock, _ =", "event. elif dev_id in self.devs and self.devs[dev_id].status == op: resp_op = ALREADY logging.info(f'{dev_id}:already", "# 4) Send some error given a discrepancy # # Succesful execution of", "new client if self.sock_ready(self.sock): csock, _ = self.sock.accept() logging.info(f':New connection') unattributed_socks.add(csock) continue #", "establish path to SSS registration secret and scewl_secret # 2) Validate the scewl_secret", "Record some error from reading in the SEDs {dev_id}_secrets folder. This may happen", "Send some error given a discrepancy # # Succesful execution of this procedure", "buffer: {repr(data)}') # Unpack message received from a given SED _, _, _,", "folders generated in dockerfiles/2b_create_sed_secrets.Dockerfile else: resp_op = ALREADY logging.info(f'{dev_id}:bad ID') body = struct.pack('<Hh',", "quality. Use this code at your own risk! # # This is the", "by any receiving SED which is part of the deployment. # # Deregistration", "key generation is done within respective dockerfiles and # this script primarily focuses", "the SSS's # registration secret # 3) Distribute AES key (16B), HMAC key", "dev_id, resp_op) # Record registration transaction and read in keys, then pack into", "parser.parse_args() def main(): args = parse_args() # map of SCEWL IDs to statuses", "not already exist try: os.unlink(sockf) except OSError: if os.path.exists(sockf): raise self.sock = socket.socket(socket.AF_UNIX,", "recvd = csock.recv(76 - len(data)) data += recvd # check for closed connection", "Deregistration is handled by sending deregistration message and removing registration secret from #", "registration. if checked_secret != scewl_secret: logging.info(f'{dev_id}:expected: {checked_secret}, found: {scewl_secret}') resp_op = ALREADY logging.info(f'{dev_id}:key", "and back ALREADY resp_op into the response. Without deployment keys, this SED is", "resides on the registering SED by comparing to the SSS's # registration secret", "try: os.unlink(sockf) except OSError: if os.path.exists(sockf): raise self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.bind(sockf) self.sock.listen(10)", "dev_id, resp_op) # Requesting repeat transaction in the case that an SED state", "out by any receiving SED which is part of the deployment. # #", "in the deployment. # AES key: 16 bytes # HMAC key: 64 bytes", "parse_args(): parser = argparse.ArgumentParser() parser.add_argument('sockf', help='Path to socket to bind the SSS to')", "logging.info(f'{dev_id}:key mismatch') body = struct.pack('<Hh', dev_id, resp_op) # Requesting repeat transaction in the", "Registration: # 1) Given any SED with valid dev_id, establish path to SSS", "client if self.sock_ready(self.sock): csock, _ = self.sock.accept() logging.info(f':New connection') unattributed_socks.add(csock) continue # check", "# Record some error from reading in the SEDs {dev_id}_secrets folder. This may", "secrets.token_bytes(32) body = struct.pack('<Hh16s32s64s', dev_id, resp_op, aes_key, seed, hmac_key) # Record deregistration for", "secret # 3) Distribute AES key (16B), HMAC key (64B) and Random seed", "sockets first old_ids = [] for dev in self.devs.values(): if dev.csock and self.sock_ready(dev.csock):", "while through use of the aformentioned keys. If an SED doesn't receive these", "# Registration: # 1) Given any SED with valid dev_id, establish path to", "csock, _ = self.sock.accept() logging.info(f':New connection') unattributed_socks.add(csock) continue # check pool of unattributed", "checked_secret != scewl_secret: logging.info(f'{dev_id}:expected: {checked_secret}, found: {scewl_secret}') resp_op = ALREADY logging.info(f'{dev_id}:key mismatch') body", "keys. If an SED doesn't receive these keys # its messages will be", "except OSError: if os.path.exists(sockf): raise self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.bind(sockf) self.sock.listen(10) self.devs =", "Read in the registration secret for verification checked_secret = secret_file.read(64) # Scewl_secret mismatch,", "with other # deployed SEDs while through use of the aformentioned keys. If", "Given any SED with valid dev_id, establish path to SSS registration secret and", "_, _, dev_id, op, scewl_secret = struct.unpack('<HHHHHH64s', data) '''Message responses are constructed below'''", "'localhost' SSS_ID = 1 # mirroring scewl enum at scewl.c:4 ALREADY, REG, DEREG", "\"rb\") as secret_file: # Read in the registration secret for verification checked_secret =", "pool of unattributed sockets first for csock in unattributed_socks: try: if self.sock_ready(csock): self.handle_transaction(csock)", "dockerfiles/2b_create_sed_secrets.Dockerfile else: resp_op = ALREADY logging.info(f'{dev_id}:bad ID') body = struct.pack('<Hh', dev_id, resp_op) #", "some error from reading in the SEDs {dev_id}_secrets folder. This may happen if", "The following methods reflect the provided insecure implementation and keep the SSS active", "for any given # deployment. Minimal changes have been made to the provided", "not recvd: raise ConnectionResetError logging.debug(f'Received buffer: {repr(data)}') # Unpack message received from a", "distribution for any given # deployment. Minimal changes have been made to the", "valid SED which can communicate in the deployment. # AES key: 16 bytes", "may not meet MITRE standards for quality. Use this code at your own", "register and # hasn't already been deregistered. else: self.devs[dev_id] = Device(dev_id, DEREG, csock)", "allow for a these # features. It should be noted that any key", "= aes_file.read(16) with open(\"/secrets/hmac_key\", \"rb\") as hmac_file: hmac_key = hmac_file.read(64) logging.info(f'{dev_id}:Registered') seed =", "should not be included on the deployment as specified # by the {dev_id}_secrets", "that any key generation is done within respective dockerfiles and # this script", "registration secret for verification checked_secret = secret_file.read(64) # Scewl_secret mismatch, registration key provided", "op. Log this event. elif dev_id in self.devs and self.devs[dev_id].status == op: resp_op", "start(self): unattributed_socks = set() # serve forever while True: # check for new", "= Device(dev_id, DEREG, csock) resp_op = DEREG logging.info(f'{dev_id}:Deregistered') body = struct.pack('<Hh', dev_id, resp_op)", "any SED with valid dev_id, establish path to SSS registration secret and scewl_secret", "SSS registration secret and scewl_secret # 2) Validate the scewl_secret that resides on", "in the case that an SED state already reflects the # received op.", "is # considered invalid for registration. if checked_secret != scewl_secret: logging.info(f'{dev_id}:expected: {checked_secret}, found:", "REG, csock) resp_op = REG with open(\"/secrets/aes_key\", \"rb\") as aes_file: aes_key = aes_file.read(16)", "data += recvd # check for closed connection if not recvd: raise ConnectionResetError", "body = struct.pack('<Hh', dev_id, resp_op) # Record some error from reading in the", "# an SED is attempted to register, which should not be included on", "csock.recv(76 - len(data)) data += recvd # check for closed connection if not", "the Secure SCEWL Server that handles SED registration and key distribution for any", "with open(\"/secrets/hmac_key\", \"rb\") as hmac_file: hmac_key = hmac_file.read(64) logging.info(f'{dev_id}:Registered') seed = secrets.token_bytes(32) body", "closed') dev.csock.close() old_ids.append(dev.id) for dev_id in old_ids: del self.devs[dev_id] def parse_args(): parser =", "key (64B) and Random seed (32B), given a match # 4) Send some", "event # and back ALREADY resp_op into the response. Without deployment keys, this", "reading in the SEDs {dev_id}_secrets folder. This may happen if # an SED", "len(data) < 76: recvd = csock.recv(76 - len(data)) data += recvd # check", "use of the aformentioned keys. If an SED doesn't receive these keys #", "# Succesful execution of this procedure means a given SED is valid and", "# registration secret # 3) Distribute AES key (16B), HMAC key (64B) and", "these # features. It should be noted that any key generation is done", "self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.bind(sockf) self.sock.listen(10) self.devs = {} @staticmethod def sock_ready(sock, op='r'):", "for registration. if checked_secret != scewl_secret: logging.info(f'{dev_id}:expected: {checked_secret}, found: {scewl_secret}') resp_op = ALREADY", "is the Secure SCEWL Server that handles SED registration and key distribution for", "{dev_id}_secrets folders generated in dockerfiles/2b_create_sed_secrets.Dockerfile else: resp_op = ALREADY logging.info(f'{dev_id}:bad ID') body =", "logging.info(f'{dev_id}:bad ID') body = struct.pack('<Hh', dev_id, resp_op) # Send response to SED constructed", "registration secret # 3) Distribute AES key (16B), HMAC key (64B) and Random", "handled by sending deregistration message and removing registration secret from # the SED", "(c) 2021 The MITRE Corporation # # This source file is part of", "os.path.exists(sockf): raise self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.bind(sockf) self.sock.listen(10) self.devs = {} @staticmethod def", "this code at your own risk! # # This is the Secure SCEWL", "body logging.debug(f'Sending response {repr(data)}') csock.send(resp) # The following methods reflect the provided insecure", "# check for new client if self.sock_ready(self.sock): csock, _ = self.sock.accept() logging.info(f':New connection')", "as hmac_file: hmac_key = hmac_file.read(64) logging.info(f'{dev_id}:Registered') seed = secrets.token_bytes(32) body = struct.pack('<Hh16s32s64s', dev_id,", "by SED is invalid. Log this event # and back ALREADY resp_op into", "DEREG logging.info(f'{dev_id}:Deregistered') body = struct.pack('<Hh', dev_id, resp_op) # Record some error from reading", "procedure means a given SED is valid and may communicate with other #", "the aformentioned keys. If an SED doesn't receive these keys # its messages", "in unattributed_socks: try: if self.sock_ready(csock): self.handle_transaction(csock) unattributed_socks.remove(csock) break except (ConnectionResetError, BrokenPipeError): logging.info(':Connection closed')", "Corporation # # This source file is part of an example system for", "(16B), HMAC key (64B) and Random seed (32B), given a match # 4)", "and may communicate with other # deployed SEDs while through use of the", "removing registration secret from # the SED (see dockerfiles/3_remove_sed.Dockerfile) import socket import select", "self.devs[dev_id].status == op: resp_op = ALREADY logging.info(f'{dev_id}:already {\"Registered\" if op == REG else", "True: # check for new client if self.sock_ready(self.sock): csock, _ = self.sock.accept() logging.info(f':New", "the registering SED by comparing to the SSS's # registration secret # 3)", "error from reading in the SEDs {dev_id}_secrets folder. This may happen if #", "= set() # serve forever while True: # check for new client if", "educational purposes for the 2021 MITRE eCTF competition, # and may not meet", "connection') unattributed_socks.add(csock) continue # check pool of unattributed sockets first for csock in", "import secrets from typing import NamedTuple SSS_IP = 'localhost' SSS_ID = 1 #", "to received registration and deregistration messages before responding def start(self): unattributed_socks = set()", "2) Validate the scewl_secret that resides on the registering SED by comparing to", "_, dev_id, op, scewl_secret = struct.unpack('<HHHHHH64s', data) '''Message responses are constructed below''' #", "{repr(data)}') # Unpack message received from a given SED _, _, _, _,", "at your own risk! # # This is the Secure SCEWL Server that", "source files to allow for a these # features. It should be noted", "# deployed SEDs while through use of the aformentioned keys. If an SED", "{checked_secret}, found: {scewl_secret}') resp_op = ALREADY logging.info(f'{dev_id}:key mismatch') body = struct.pack('<Hh', dev_id, resp_op)", "aes_key, seed, hmac_key) # Record deregistration for an SED which was verified previously", "(ConnectionResetError, BrokenPipeError): logging.info(f'{dev.id}:Connection closed') dev.csock.close() old_ids.append(dev.id) for dev_id in old_ids: del self.devs[dev_id] def", "example system for MITRE's 2021 Embedded System CTF (eCTF). # This code is", "the deployment as specified # by the {dev_id}_secrets folders generated in dockerfiles/2b_create_sed_secrets.Dockerfile else:", "read in keys, then pack into response. This is # a valid SED", "args = parse_args() # map of SCEWL IDs to statuses sss = SSS(args.sockf)", "a match # 4) Send some error given a discrepancy # # Succesful", "generation is done within respective dockerfiles and # this script primarily focuses on", "logging.info(f'{dev_id}:already {\"Registered\" if op == REG else \"Deregistered\"}') body = struct.pack('<Hh', dev_id, resp_op)", "focuses on verifying an SED as valid and distributing deployment wide keys. #", "valid and distributing deployment wide keys. # # Registration: # 1) Given any", "source file is part of an example system for MITRE's 2021 Embedded System", "MITRE's 2021 Embedded System CTF (eCTF). # This code is being provided only", "elif op == REG: self.devs[dev_id] = Device(dev_id, REG, csock) resp_op = REG with", "body = struct.pack('<Hh', dev_id, resp_op) # Send response to SED constructed in the", "check for closed connection if not recvd: raise ConnectionResetError logging.debug(f'Received buffer: {repr(data)}') #", "= struct.pack('<Hh', dev_id, resp_op) # Record some error from reading in the SEDs", "this event # and back ALREADY resp_op into the response. Without deployment keys," ]
[ "np.sqrt(np.pi) * r[i] g = (1 / 1000) * np.sqrt(np.pi) * r[i] w,", "g): freq_low = 0 freq_high = 5 num_freq = 300 w = np.arange(freq_low,", "datasets:' % sets) ftrTrain, ftrTest, lblTrain, lblTest = train_test_split(features, labels, test_size=0.2, random_state=1234) train_data", "torch.utils.data import Dataset from sklearn.model_selection import train_test_split # Define the class for the", "are %i datasets:' % sets) ftrTrain, ftrTest, lblTrain, lblTest = train_test_split(features, labels, test_size=0.2,", "Dataset from sklearn.model_selection import train_test_split # Define the class for the Meta-material dataset", "= np.divide(np.multiply(np.power(wp, 2), np.add(np.power(w0, 2), -np.power(w, 2))), # np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2),", "wp, g): freq_low = 0 freq_high = 5 num_freq = 300 w =", "freq_high = 5 num_freq = 300 w = np.arange(freq_low, freq_high, (freq_high - freq_low)", "arrays each iteration and then selects 0th element to generate random n x", "+= e2 return geom, spectra # Calculate Lorentzian function to get spectra def", "2)))) e2 = np.divide(np.multiply(np.power(wp, 2), np.multiply(w, g)), np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), np.multiply(np.power(w,", "bounds for metamaterial radius and height r_min = 20 r_max = 200 h_min", "MM_Geom(n) spectra = np.zeros(300) geom = np.concatenate((r, h), axis=0) for i in range(n):", "* np.sqrt(np.pi) * r[i] g = (1 / 1000) * np.sqrt(np.pi) * r[i]", "element to generate random n x n parameter set r, h = np.zeros(n,", "import Dataset from sklearn.model_selection import train_test_split # Define the class for the Meta-material", "1) # Shuffles r,h arrays each iteration and then selects 0th element to", "freq_low = 0 freq_high = 5 num_freq = 300 w = np.arange(freq_low, freq_high,", "print('Number of Test samples is {}'.format(len(ftrTest))) return train_loader, test_loader def gen_data(name): train_loader, test_loader", "train_loader, test_loader = Prepare_Data(1, 10000, 1000) with open(name, 'a') as datafile: for j,", "labels, which is always the Spectra !! :param bool_train: \"\"\" self.ftr = ftr", "/ 1000) * np.sqrt(np.pi) * r[i] w, e2 = Lorentzian(w0, wp, g) spectra", "features = [] labels = [] for i in range(sets): geom, spectra =", "= ftr self.lbl = lbl self.bool_train = bool_train self.len = len(ftr) def __len__(self):", "= r_space[0] h[i] = h_space[0] return r, h # Make geometry and spectra", "= np.zeros(n, dtype=float), np.zeros(n, dtype=float) for i in range(n): np.random.shuffle(r_space) np.random.shuffle(h_space) r[i] =", "ftrTrain, ftrTest, lblTrain, lblTest = train_test_split(features, labels, test_size=0.2, random_state=1234) train_data = MetaMaterialDataSet(ftrTrain, lblTrain,", "as plt import pandas as pd import torch from torch.utils.data import Dataset from", "i in range(sets): geom, spectra = Make_MM_Model(osc) features.append(geom) labels.append(spectra) features = np.array(features, dtype='float32')", "pandas as pd import torch from torch.utils.data import Dataset from sklearn.model_selection import train_test_split", "len(ftr) def __len__(self): return self.len def __getitem__(self, ind): return self.ftr[ind, :], self.lbl[ind, :]", "2)), 2), # np.multiply(np.power(w, 2), np.power(g, 2)))) e2 = np.divide(np.multiply(np.power(wp, 2), np.multiply(w, g)),", "Labels is %i' % (ftrsize, lblsize)) print('There are %i datasets:' % sets) ftrTrain,", "2), np.multiply(w, g)), np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), np.multiply(np.power(w, 2), np.power(g, 2)))) return", "= len(ftr) def __len__(self): return self.len def __getitem__(self, ind): return self.ftr[ind, :], self.lbl[ind,", "h # Make geometry and spectra def Make_MM_Model(n): r, h = MM_Geom(n) spectra", "space + 1) h_space = np.linspace(h_min, h_max, space + 1) # Shuffles r,h", "def __getitem__(self, ind): return self.ftr[ind, :], self.lbl[ind, :] ## Copied from Omar's code", "np.power(g, 2)))) e2 = np.divide(np.multiply(np.power(wp, 2), np.multiply(w, g)), np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2),", "simulated spectra for training and testing def Prepare_Data(osc, sets, batch_size): features = []", "np import matplotlib.pyplot as plt import pandas as pd import torch from torch.utils.data", "# np.multiply(np.power(w, 2), np.power(g, 2)))) e2 = np.divide(np.multiply(np.power(wp, 2), np.multiply(w, g)), np.add(np.power(np.add(np.power(w0, 2),", "j, (geometry, spectra) in enumerate(train_loader): concate = np.concatenate([geometry, spectra], axis=1) #print(np.shape(concate)) np.savetxt(datafile, concate,", "# Calculate Lorentzian function to get spectra def Lorentzian(w0, wp, g): freq_low =", "np.multiply(np.power(w, 2), np.power(g, 2)))) return w, e2 # Generates randomized dataset of simulated", "labels, test_size=0.2, random_state=1234) train_data = MetaMaterialDataSet(ftrTrain, lblTrain, bool_train=True) test_data = MetaMaterialDataSet(ftrTest, lblTest, bool_train=False)", "bool_train self.len = len(ftr) def __len__(self): return self.len def __getitem__(self, ind): return self.ftr[ind,", "/ h[i] wp = (1 / 100) * np.sqrt(np.pi) * r[i] g =", "geom, spectra # Calculate Lorentzian function to get spectra def Lorentzian(w0, wp, g):", "2), np.add(np.power(w0, 2), -np.power(w, 2))), # np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), # np.multiply(np.power(w,", "of Training samples is {}'.format(len(ftrTrain))) print('Number of Test samples is {}'.format(len(ftrTest))) return train_loader,", "np.linspace(h_min, h_max, space + 1) # Shuffles r,h arrays each iteration and then", "np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), np.multiply(np.power(w, 2), np.power(g, 2)))) return w, e2 #", "print('There are %i datasets:' % sets) ftrTrain, ftrTest, lblTrain, lblTest = train_test_split(features, labels,", "wp = (1 / 100) * np.sqrt(np.pi) * r[i] g = (1 /", "self.ftr[ind, :], self.lbl[ind, :] ## Copied from Omar's code # Make geometry samples", "= Lorentzian(w0, wp, g) spectra += e2 return geom, spectra # Calculate Lorentzian", "axis=0) for i in range(n): w0 = 100 / h[i] wp = (1", "test_data = MetaMaterialDataSet(ftrTest, lblTest, bool_train=False) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size)", "geom = np.concatenate((r, h), axis=0) for i in range(n): w0 = 100 /", "n x n parameter set r, h = np.zeros(n, dtype=float), np.zeros(n, dtype=float) for", "sets lblsize = labels.size / sets print('Size of Features is %i, Size of", "labels = [] for i in range(sets): geom, spectra = Make_MM_Model(osc) features.append(geom) labels.append(spectra)", "bool_train): \"\"\" Instantiate the Dataset Object :param ftr: the features which is always", "and spectra def Make_MM_Model(n): r, h = MM_Geom(n) spectra = np.zeros(300) geom =", "2), # np.multiply(np.power(w, 2), np.power(g, 2)))) e2 = np.divide(np.multiply(np.power(wp, 2), np.multiply(w, g)), np.add(np.power(np.add(np.power(w0,", "w0 = 100 / h[i] wp = (1 / 100) * np.sqrt(np.pi) *", "radius and height r_min = 20 r_max = 200 h_min = 20 h_max", "def __len__(self): return self.len def __getitem__(self, ind): return self.ftr[ind, :], self.lbl[ind, :] ##", "h_max = 100 # Defines hypergeometric space of parameters to choose from space", "+ 1) h_space = np.linspace(h_min, h_max, space + 1) # Shuffles r,h arrays", "to generate random n x n parameter set r, h = np.zeros(n, dtype=float),", "2)))) return w, e2 # Generates randomized dataset of simulated spectra for training", "Copied from Omar's code # Make geometry samples def MM_Geom(n): # Parameter bounds", "!! :param bool_train: \"\"\" self.ftr = ftr self.lbl = lbl self.bool_train = bool_train", "h = np.zeros(n, dtype=float), np.zeros(n, dtype=float) for i in range(n): np.random.shuffle(r_space) np.random.shuffle(h_space) r[i]", "always the Geometry !! :param lbl: the labels, which is always the Spectra", "Features is %i, Size of Labels is %i' % (ftrsize, lblsize)) print('There are", "Define the class for the Meta-material dataset class MetaMaterialDataSet(Dataset): \"\"\" The Meta Material", "from torch.utils.data import Dataset from sklearn.model_selection import train_test_split # Define the class for", "range(n): np.random.shuffle(r_space) np.random.shuffle(h_space) r[i] = r_space[0] h[i] = h_space[0] return r, h #", "np.power(g, 2)))) return w, e2 # Generates randomized dataset of simulated spectra for", "np.savetxt(datafile, concate, delimiter=',') if __name__ == \"__main__\": train_loader, test_loader = Prepare_Data(1, 10000, 1000)", "g = (1 / 1000) * np.sqrt(np.pi) * r[i] w, e2 = Lorentzian(w0,", "/ sets lblsize = labels.size / sets print('Size of Features is %i, Size", "Lorentzian(w0, wp, g): freq_low = 0 freq_high = 5 num_freq = 300 w", "2), np.power(g, 2)))) return w, e2 # Generates randomized dataset of simulated spectra", "= Prepare_Data(1, 10000, 1000) with open(name, 'a') as datafile: for j, (geometry, spectra)", "# print(np.shape(concate)) np.savetxt(datafile, concate, delimiter=',') if __name__ == \"__main__\": train_loader, test_loader = Prepare_Data(1,", "(1 / 100) * np.sqrt(np.pi) * r[i] g = (1 / 1000) *", "= torch.utils.data.DataLoader(train_data, batch_size=batch_size) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) print('Number of Training samples is {}'.format(len(ftrTrain)))", "which is always the Spectra !! :param bool_train: \"\"\" self.ftr = ftr self.lbl", "Instantiate the Dataset Object :param ftr: the features which is always the Geometry", "/ sets print('Size of Features is %i, Size of Labels is %i' %", "iteration and then selects 0th element to generate random n x n parameter", "as datafile: for j, (geometry, spectra) in enumerate(train_loader): concate = np.concatenate([geometry, spectra], axis=1)", "dataset class MetaMaterialDataSet(Dataset): \"\"\" The Meta Material Dataset Class \"\"\" def __init__(self, ftr,", "h_space[0] return r, h # Make geometry and spectra def Make_MM_Model(n): r, h", "is {}'.format(len(ftrTrain))) print('Number of Test samples is {}'.format(len(ftrTest))) return train_loader, test_loader def gen_data(name):", "spectra = Make_MM_Model(osc) features.append(geom) labels.append(spectra) features = np.array(features, dtype='float32') labels = np.array(labels, dtype='float32')", "range(sets): geom, spectra = Make_MM_Model(osc) features.append(geom) labels.append(spectra) features = np.array(features, dtype='float32') labels =", "dtype='float32') labels = np.array(labels, dtype='float32') ftrsize = features.size / sets lblsize = labels.size", "gen_data(name): train_loader, test_loader = Prepare_Data(1, 10000, 1000) with open(name, 'a') as datafile: for", "datafile: for j, (geometry, spectra) in enumerate(train_loader): concate = np.concatenate([geometry, spectra], axis=1) #print(np.shape(concate))", "'a') as datafile: for j, (geometry, spectra) in enumerate(train_loader): concate = np.concatenate([geometry, spectra],", "and then selects 0th element to generate random n x n parameter set", "Geometry !! :param lbl: the labels, which is always the Spectra !! :param", "print('Size of Features is %i, Size of Labels is %i' % (ftrsize, lblsize))", "samples def MM_Geom(n): # Parameter bounds for metamaterial radius and height r_min =", "Make geometry and spectra def Make_MM_Model(n): r, h = MM_Geom(n) spectra = np.zeros(300)", "sets print('Size of Features is %i, Size of Labels is %i' % (ftrsize,", "def Make_MM_Model(n): r, h = MM_Geom(n) spectra = np.zeros(300) geom = np.concatenate((r, h),", "in range(n): np.random.shuffle(r_space) np.random.shuffle(h_space) r[i] = r_space[0] h[i] = h_space[0] return r, h", "lblTrain, lblTest = train_test_split(features, labels, test_size=0.2, random_state=1234) train_data = MetaMaterialDataSet(ftrTrain, lblTrain, bool_train=True) test_data", "2)), 2), np.multiply(np.power(w, 2), np.power(g, 2)))) return w, e2 # Generates randomized dataset", "Make_MM_Model(n): r, h = MM_Geom(n) spectra = np.zeros(300) geom = np.concatenate((r, h), axis=0)", "of Test samples is {}'.format(len(ftrTest))) return train_loader, test_loader def gen_data(name): train_loader, test_loader =", "= MetaMaterialDataSet(ftrTrain, lblTrain, bool_train=True) test_data = MetaMaterialDataSet(ftrTest, lblTest, bool_train=False) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size)", "r, h = MM_Geom(n) spectra = np.zeros(300) geom = np.concatenate((r, h), axis=0) for", "20 h_max = 100 # Defines hypergeometric space of parameters to choose from", "of parameters to choose from space = 10 r_space = np.linspace(r_min, r_max, space", "training and testing def Prepare_Data(osc, sets, batch_size): features = [] labels = []", "generate random n x n parameter set r, h = np.zeros(n, dtype=float), np.zeros(n,", "space of parameters to choose from space = 10 r_space = np.linspace(r_min, r_max,", "e2 = Lorentzian(w0, wp, g) spectra += e2 return geom, spectra # Calculate", "train_data = MetaMaterialDataSet(ftrTrain, lblTrain, bool_train=True) test_data = MetaMaterialDataSet(ftrTest, lblTest, bool_train=False) train_loader = torch.utils.data.DataLoader(train_data,", "def Prepare_Data(osc, sets, batch_size): features = [] labels = [] for i in", "datafile: for j, (geometry, spectra) in enumerate(train_loader): concate = np.concatenate([geometry, spectra], axis=1) #", "ftrsize = features.size / sets lblsize = labels.size / sets print('Size of Features", "Shuffles r,h arrays each iteration and then selects 0th element to generate random", "* np.sqrt(np.pi) * r[i] w, e2 = Lorentzian(w0, wp, g) spectra += e2", "\"__main__\": train_loader, test_loader = Prepare_Data(1, 10000, 1000) with open('toy_data/mm1d_6.csv', 'a') as datafile: for", "to choose from space = 10 r_space = np.linspace(r_min, r_max, space + 1)", "= (1 / 100) * np.sqrt(np.pi) * r[i] g = (1 / 1000)", "5 num_freq = 300 w = np.arange(freq_low, freq_high, (freq_high - freq_low) / num_freq)", "= bool_train self.len = len(ftr) def __len__(self): return self.len def __getitem__(self, ind): return", "[] for i in range(sets): geom, spectra = Make_MM_Model(osc) features.append(geom) labels.append(spectra) features =", "with open('toy_data/mm1d_6.csv', 'a') as datafile: for j, (geometry, spectra) in enumerate(train_loader): concate =", "spectra def Lorentzian(w0, wp, g): freq_low = 0 freq_high = 5 num_freq =", "2), -np.power(w, 2))), # np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), # np.multiply(np.power(w, 2), np.power(g,", "from Omar's code # Make geometry samples def MM_Geom(n): # Parameter bounds for", "2), np.multiply(np.power(w, 2), np.power(g, 2)))) return w, e2 # Generates randomized dataset of", "= np.zeros(300) geom = np.concatenate((r, h), axis=0) for i in range(n): w0 =", "is always the Geometry !! :param lbl: the labels, which is always the", "then selects 0th element to generate random n x n parameter set r,", "import torch from torch.utils.data import Dataset from sklearn.model_selection import train_test_split # Define the", "h_max, space + 1) # Shuffles r,h arrays each iteration and then selects", "= [] for i in range(sets): geom, spectra = Make_MM_Model(osc) features.append(geom) labels.append(spectra) features", "np.zeros(300) geom = np.concatenate((r, h), axis=0) for i in range(n): w0 = 100", "train_loader, test_loader def gen_data(name): train_loader, test_loader = Prepare_Data(1, 10000, 1000) with open(name, 'a')", "g) spectra += e2 return geom, spectra # Calculate Lorentzian function to get", "10000, 1000) with open('toy_data/mm1d_6.csv', 'a') as datafile: for j, (geometry, spectra) in enumerate(train_loader):", "MetaMaterialDataSet(ftrTest, lblTest, bool_train=False) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) print('Number of", "= MetaMaterialDataSet(ftrTest, lblTest, bool_train=False) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) print('Number", "Lorentzian function to get spectra def Lorentzian(w0, wp, g): freq_low = 0 freq_high", "lbl, bool_train): \"\"\" Instantiate the Dataset Object :param ftr: the features which is", "np.zeros(n, dtype=float) for i in range(n): np.random.shuffle(r_space) np.random.shuffle(h_space) r[i] = r_space[0] h[i] =", "np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), # np.multiply(np.power(w, 2), np.power(g, 2)))) e2 = np.divide(np.multiply(np.power(wp,", "# Generates randomized dataset of simulated spectra for training and testing def Prepare_Data(osc,", "num_freq = 300 w = np.arange(freq_low, freq_high, (freq_high - freq_low) / num_freq) #", "# e1 = np.divide(np.multiply(np.power(wp, 2), np.add(np.power(w0, 2), -np.power(w, 2))), # np.add(np.power(np.add(np.power(w0, 2), -np.power(w,", "hypergeometric space of parameters to choose from space = 10 r_space = np.linspace(r_min,", "always the Spectra !! :param bool_train: \"\"\" self.ftr = ftr self.lbl = lbl", "np.random.shuffle(r_space) np.random.shuffle(h_space) r[i] = r_space[0] h[i] = h_space[0] return r, h # Make", "r,h arrays each iteration and then selects 0th element to generate random n", "parameters to choose from space = 10 r_space = np.linspace(r_min, r_max, space +", "class for the Meta-material dataset class MetaMaterialDataSet(Dataset): \"\"\" The Meta Material Dataset Class", "= np.divide(np.multiply(np.power(wp, 2), np.multiply(w, g)), np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), np.multiply(np.power(w, 2), np.power(g,", "torch.utils.data.DataLoader(train_data, batch_size=batch_size) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) print('Number of Training samples is {}'.format(len(ftrTrain))) print('Number", "2))), # np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), # np.multiply(np.power(w, 2), np.power(g, 2)))) e2", "ind): return self.ftr[ind, :], self.lbl[ind, :] ## Copied from Omar's code # Make", "def Lorentzian(w0, wp, g): freq_low = 0 freq_high = 5 num_freq = 300", "spectra for training and testing def Prepare_Data(osc, sets, batch_size): features = [] labels", "10000, 1000) with open(name, 'a') as datafile: for j, (geometry, spectra) in enumerate(train_loader):", "# Shuffles r,h arrays each iteration and then selects 0th element to generate", "plt import pandas as pd import torch from torch.utils.data import Dataset from sklearn.model_selection", "= np.linspace(h_min, h_max, space + 1) # Shuffles r,h arrays each iteration and", "self.len def __getitem__(self, ind): return self.ftr[ind, :], self.lbl[ind, :] ## Copied from Omar's", "Parameter bounds for metamaterial radius and height r_min = 20 r_max = 200", "Spectra !! :param bool_train: \"\"\" self.ftr = ftr self.lbl = lbl self.bool_train =", "= features.size / sets lblsize = labels.size / sets print('Size of Features is", "testing def Prepare_Data(osc, sets, batch_size): features = [] labels = [] for i", "pd import torch from torch.utils.data import Dataset from sklearn.model_selection import train_test_split # Define", "np.zeros(n, dtype=float), np.zeros(n, dtype=float) for i in range(n): np.random.shuffle(r_space) np.random.shuffle(h_space) r[i] = r_space[0]", "200 h_min = 20 h_max = 100 # Defines hypergeometric space of parameters", "train_loader, test_loader = Prepare_Data(1, 10000, 1000) with open('toy_data/mm1d_6.csv', 'a') as datafile: for j,", "spectra], axis=1) # print(np.shape(concate)) np.savetxt(datafile, concate, delimiter=',') if __name__ == \"__main__\": train_loader, test_loader", "each iteration and then selects 0th element to generate random n x n", "wp, g) spectra += e2 return geom, spectra # Calculate Lorentzian function to", "self.ftr = ftr self.lbl = lbl self.bool_train = bool_train self.len = len(ftr) def", "1000) with open('toy_data/mm1d_6.csv', 'a') as datafile: for j, (geometry, spectra) in enumerate(train_loader): concate", "num_freq) # e1 = np.divide(np.multiply(np.power(wp, 2), np.add(np.power(w0, 2), -np.power(w, 2))), # np.add(np.power(np.add(np.power(w0, 2),", "= 20 h_max = 100 # Defines hypergeometric space of parameters to choose", "e2 # Generates randomized dataset of simulated spectra for training and testing def", "The Meta Material Dataset Class \"\"\" def __init__(self, ftr, lbl, bool_train): \"\"\" Instantiate", "\"\"\" def __init__(self, ftr, lbl, bool_train): \"\"\" Instantiate the Dataset Object :param ftr:", "n parameter set r, h = np.zeros(n, dtype=float), np.zeros(n, dtype=float) for i in", "return geom, spectra # Calculate Lorentzian function to get spectra def Lorentzian(w0, wp,", "of simulated spectra for training and testing def Prepare_Data(osc, sets, batch_size): features =", "r_space[0] h[i] = h_space[0] return r, h # Make geometry and spectra def", "Make_MM_Model(osc) features.append(geom) labels.append(spectra) features = np.array(features, dtype='float32') labels = np.array(labels, dtype='float32') ftrsize =", "self.len = len(ftr) def __len__(self): return self.len def __getitem__(self, ind): return self.ftr[ind, :],", "the Geometry !! :param lbl: the labels, which is always the Spectra !!", "in range(sets): geom, spectra = Make_MM_Model(osc) features.append(geom) labels.append(spectra) features = np.array(features, dtype='float32') labels", "100) * np.sqrt(np.pi) * r[i] g = (1 / 1000) * np.sqrt(np.pi) *", "Meta-material dataset class MetaMaterialDataSet(Dataset): \"\"\" The Meta Material Dataset Class \"\"\" def __init__(self,", "import train_test_split # Define the class for the Meta-material dataset class MetaMaterialDataSet(Dataset): \"\"\"", "= 10 r_space = np.linspace(r_min, r_max, space + 1) h_space = np.linspace(h_min, h_max,", "Prepare_Data(1, 10000, 1000) with open(name, 'a') as datafile: for j, (geometry, spectra) in", "\"\"\" self.ftr = ftr self.lbl = lbl self.bool_train = bool_train self.len = len(ftr)", "# np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), # np.multiply(np.power(w, 2), np.power(g, 2)))) e2 =", "- freq_low) / num_freq) # e1 = np.divide(np.multiply(np.power(wp, 2), np.add(np.power(w0, 2), -np.power(w, 2))),", "lblsize = labels.size / sets print('Size of Features is %i, Size of Labels", "% (ftrsize, lblsize)) print('There are %i datasets:' % sets) ftrTrain, ftrTest, lblTrain, lblTest", "set r, h = np.zeros(n, dtype=float), np.zeros(n, dtype=float) for i in range(n): np.random.shuffle(r_space)", "%i datasets:' % sets) ftrTrain, ftrTest, lblTrain, lblTest = train_test_split(features, labels, test_size=0.2, random_state=1234)", "= MM_Geom(n) spectra = np.zeros(300) geom = np.concatenate((r, h), axis=0) for i in", "torch from torch.utils.data import Dataset from sklearn.model_selection import train_test_split # Define the class", "import matplotlib.pyplot as plt import pandas as pd import torch from torch.utils.data import", "test_size=0.2, random_state=1234) train_data = MetaMaterialDataSet(ftrTrain, lblTrain, bool_train=True) test_data = MetaMaterialDataSet(ftrTest, lblTest, bool_train=False) train_loader", "w, e2 # Generates randomized dataset of simulated spectra for training and testing", "(geometry, spectra) in enumerate(train_loader): concate = np.concatenate([geometry, spectra], axis=1) #print(np.shape(concate)) np.savetxt(datafile, concate, delimiter=',')", "metamaterial radius and height r_min = 20 r_max = 200 h_min = 20", "train_test_split(features, labels, test_size=0.2, random_state=1234) train_data = MetaMaterialDataSet(ftrTrain, lblTrain, bool_train=True) test_data = MetaMaterialDataSet(ftrTest, lblTest,", "geometry samples def MM_Geom(n): # Parameter bounds for metamaterial radius and height r_min", "enumerate(train_loader): concate = np.concatenate([geometry, spectra], axis=1) # print(np.shape(concate)) np.savetxt(datafile, concate, delimiter=',') if __name__", "for i in range(n): np.random.shuffle(r_space) np.random.shuffle(h_space) r[i] = r_space[0] h[i] = h_space[0] return", "matplotlib.pyplot as plt import pandas as pd import torch from torch.utils.data import Dataset", "= 100 / h[i] wp = (1 / 100) * np.sqrt(np.pi) * r[i]", "0th element to generate random n x n parameter set r, h =", "Training samples is {}'.format(len(ftrTrain))) print('Number of Test samples is {}'.format(len(ftrTest))) return train_loader, test_loader", "= np.concatenate((r, h), axis=0) for i in range(n): w0 = 100 / h[i]", "np.concatenate((r, h), axis=0) for i in range(n): w0 = 100 / h[i] wp", "= 100 # Defines hypergeometric space of parameters to choose from space =", "= Make_MM_Model(osc) features.append(geom) labels.append(spectra) features = np.array(features, dtype='float32') labels = np.array(labels, dtype='float32') ftrsize", "np.concatenate([geometry, spectra], axis=1) # print(np.shape(concate)) np.savetxt(datafile, concate, delimiter=',') if __name__ == \"__main__\": train_loader,", "Test samples is {}'.format(len(ftrTest))) return train_loader, test_loader def gen_data(name): train_loader, test_loader = Prepare_Data(1,", "labels.append(spectra) features = np.array(features, dtype='float32') labels = np.array(labels, dtype='float32') ftrsize = features.size /", "# Parameter bounds for metamaterial radius and height r_min = 20 r_max =", "__len__(self): return self.len def __getitem__(self, ind): return self.ftr[ind, :], self.lbl[ind, :] ## Copied", "1000) with open(name, 'a') as datafile: for j, (geometry, spectra) in enumerate(train_loader): concate", "the Spectra !! :param bool_train: \"\"\" self.ftr = ftr self.lbl = lbl self.bool_train", "sets, batch_size): features = [] labels = [] for i in range(sets): geom,", "Object :param ftr: the features which is always the Geometry !! :param lbl:", "return r, h # Make geometry and spectra def Make_MM_Model(n): r, h =", "sets) ftrTrain, ftrTest, lblTrain, lblTest = train_test_split(features, labels, test_size=0.2, random_state=1234) train_data = MetaMaterialDataSet(ftrTrain,", "Prepare_Data(1, 10000, 1000) with open('toy_data/mm1d_6.csv', 'a') as datafile: for j, (geometry, spectra) in", "{}'.format(len(ftrTest))) return train_loader, test_loader def gen_data(name): train_loader, test_loader = Prepare_Data(1, 10000, 1000) with", "def __init__(self, ftr, lbl, bool_train): \"\"\" Instantiate the Dataset Object :param ftr: the", "* r[i] g = (1 / 1000) * np.sqrt(np.pi) * r[i] w, e2", "np.divide(np.multiply(np.power(wp, 2), np.multiply(w, g)), np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), np.multiply(np.power(w, 2), np.power(g, 2))))", "in enumerate(train_loader): concate = np.concatenate([geometry, spectra], axis=1) # print(np.shape(concate)) np.savetxt(datafile, concate, delimiter=',') if", "ftrTest, lblTrain, lblTest = train_test_split(features, labels, test_size=0.2, random_state=1234) train_data = MetaMaterialDataSet(ftrTrain, lblTrain, bool_train=True)", "ftr, lbl, bool_train): \"\"\" Instantiate the Dataset Object :param ftr: the features which", "MetaMaterialDataSet(Dataset): \"\"\" The Meta Material Dataset Class \"\"\" def __init__(self, ftr, lbl, bool_train):", "test_loader = Prepare_Data(1, 10000, 1000) with open('toy_data/mm1d_6.csv', 'a') as datafile: for j, (geometry,", "self.lbl[ind, :] ## Copied from Omar's code # Make geometry samples def MM_Geom(n):", "np.linspace(r_min, r_max, space + 1) h_space = np.linspace(h_min, h_max, space + 1) #", "i in range(n): w0 = 100 / h[i] wp = (1 / 100)", "r, h = np.zeros(n, dtype=float), np.zeros(n, dtype=float) for i in range(n): np.random.shuffle(r_space) np.random.shuffle(h_space)", "and testing def Prepare_Data(osc, sets, batch_size): features = [] labels = [] for", "np.array(features, dtype='float32') labels = np.array(labels, dtype='float32') ftrsize = features.size / sets lblsize =", "import pandas as pd import torch from torch.utils.data import Dataset from sklearn.model_selection import", "for training and testing def Prepare_Data(osc, sets, batch_size): features = [] labels =", "Generates randomized dataset of simulated spectra for training and testing def Prepare_Data(osc, sets,", "(1 / 1000) * np.sqrt(np.pi) * r[i] w, e2 = Lorentzian(w0, wp, g)", "the class for the Meta-material dataset class MetaMaterialDataSet(Dataset): \"\"\" The Meta Material Dataset", "2), np.power(g, 2)))) e2 = np.divide(np.multiply(np.power(wp, 2), np.multiply(w, g)), np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)),", "h[i] wp = (1 / 100) * np.sqrt(np.pi) * r[i] g = (1", "h), axis=0) for i in range(n): w0 = 100 / h[i] wp =", "spectra) in enumerate(train_loader): concate = np.concatenate([geometry, spectra], axis=1) # print(np.shape(concate)) np.savetxt(datafile, concate, delimiter=',')", "1000) * np.sqrt(np.pi) * r[i] w, e2 = Lorentzian(w0, wp, g) spectra +=", ":param lbl: the labels, which is always the Spectra !! :param bool_train: \"\"\"", "-np.power(w, 2)), 2), # np.multiply(np.power(w, 2), np.power(g, 2)))) e2 = np.divide(np.multiply(np.power(wp, 2), np.multiply(w,", "Calculate Lorentzian function to get spectra def Lorentzian(w0, wp, g): freq_low = 0", "e2 = np.divide(np.multiply(np.power(wp, 2), np.multiply(w, g)), np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), np.multiply(np.power(w, 2),", "np.sqrt(np.pi) * r[i] w, e2 = Lorentzian(w0, wp, g) spectra += e2 return", "def gen_data(name): train_loader, test_loader = Prepare_Data(1, 10000, 1000) with open(name, 'a') as datafile:", "h[i] = h_space[0] return r, h # Make geometry and spectra def Make_MM_Model(n):", "samples is {}'.format(len(ftrTrain))) print('Number of Test samples is {}'.format(len(ftrTest))) return train_loader, test_loader def", "concate, delimiter=',') if __name__ == \"__main__\": train_loader, test_loader = Prepare_Data(1, 10000, 1000) with", "spectra = np.zeros(300) geom = np.concatenate((r, h), axis=0) for i in range(n): w0", "Material Dataset Class \"\"\" def __init__(self, ftr, lbl, bool_train): \"\"\" Instantiate the Dataset", "= np.array(features, dtype='float32') labels = np.array(labels, dtype='float32') ftrsize = features.size / sets lblsize", "!! :param lbl: the labels, which is always the Spectra !! :param bool_train:", "2), -np.power(w, 2)), 2), # np.multiply(np.power(w, 2), np.power(g, 2)))) e2 = np.divide(np.multiply(np.power(wp, 2),", "for j, (geometry, spectra) in enumerate(train_loader): concate = np.concatenate([geometry, spectra], axis=1) # print(np.shape(concate))", "the Meta-material dataset class MetaMaterialDataSet(Dataset): \"\"\" The Meta Material Dataset Class \"\"\" def", "as np import matplotlib.pyplot as plt import pandas as pd import torch from", "of Features is %i, Size of Labels is %i' % (ftrsize, lblsize)) print('There", "np.array(labels, dtype='float32') ftrsize = features.size / sets lblsize = labels.size / sets print('Size", "i in range(n): np.random.shuffle(r_space) np.random.shuffle(h_space) r[i] = r_space[0] h[i] = h_space[0] return r,", "Meta Material Dataset Class \"\"\" def __init__(self, ftr, lbl, bool_train): \"\"\" Instantiate the", "random_state=1234) train_data = MetaMaterialDataSet(ftrTrain, lblTrain, bool_train=True) test_data = MetaMaterialDataSet(ftrTest, lblTest, bool_train=False) train_loader =", "dtype='float32') ftrsize = features.size / sets lblsize = labels.size / sets print('Size of", "20 r_max = 200 h_min = 20 h_max = 100 # Defines hypergeometric", "/ 100) * np.sqrt(np.pi) * r[i] g = (1 / 1000) * np.sqrt(np.pi)", "np.divide(np.multiply(np.power(wp, 2), np.add(np.power(w0, 2), -np.power(w, 2))), # np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), #", "10 r_space = np.linspace(r_min, r_max, space + 1) h_space = np.linspace(h_min, h_max, space", "parameter set r, h = np.zeros(n, dtype=float), np.zeros(n, dtype=float) for i in range(n):", "= np.arange(freq_low, freq_high, (freq_high - freq_low) / num_freq) # e1 = np.divide(np.multiply(np.power(wp, 2),", "labels = np.array(labels, dtype='float32') ftrsize = features.size / sets lblsize = labels.size /", "r_min = 20 r_max = 200 h_min = 20 h_max = 100 #", "h = MM_Geom(n) spectra = np.zeros(300) geom = np.concatenate((r, h), axis=0) for i", "# Defines hypergeometric space of parameters to choose from space = 10 r_space", "Dataset Class \"\"\" def __init__(self, ftr, lbl, bool_train): \"\"\" Instantiate the Dataset Object", "# Make geometry samples def MM_Geom(n): # Parameter bounds for metamaterial radius and", "from space = 10 r_space = np.linspace(r_min, r_max, space + 1) h_space =", "%i' % (ftrsize, lblsize)) print('There are %i datasets:' % sets) ftrTrain, ftrTest, lblTrain,", "geom, spectra = Make_MM_Model(osc) features.append(geom) labels.append(spectra) features = np.array(features, dtype='float32') labels = np.array(labels,", "= 20 r_max = 200 h_min = 20 h_max = 100 # Defines", ":] ## Copied from Omar's code # Make geometry samples def MM_Geom(n): #", "300 w = np.arange(freq_low, freq_high, (freq_high - freq_low) / num_freq) # e1 =", "dtype=float), np.zeros(n, dtype=float) for i in range(n): np.random.shuffle(r_space) np.random.shuffle(h_space) r[i] = r_space[0] h[i]", "= h_space[0] return r, h # Make geometry and spectra def Make_MM_Model(n): r,", "lbl self.bool_train = bool_train self.len = len(ftr) def __len__(self): return self.len def __getitem__(self,", "from sklearn.model_selection import train_test_split # Define the class for the Meta-material dataset class", "(geometry, spectra) in enumerate(train_loader): concate = np.concatenate([geometry, spectra], axis=1) # print(np.shape(concate)) np.savetxt(datafile, concate,", "numpy as np import matplotlib.pyplot as plt import pandas as pd import torch", "batch_size=batch_size) print('Number of Training samples is {}'.format(len(ftrTrain))) print('Number of Test samples is {}'.format(len(ftrTest)))", "open(name, 'a') as datafile: for j, (geometry, spectra) in enumerate(train_loader): concate = np.concatenate([geometry,", "np.multiply(np.power(w, 2), np.power(g, 2)))) e2 = np.divide(np.multiply(np.power(wp, 2), np.multiply(w, g)), np.add(np.power(np.add(np.power(w0, 2), -np.power(w,", "= lbl self.bool_train = bool_train self.len = len(ftr) def __len__(self): return self.len def", "class MetaMaterialDataSet(Dataset): \"\"\" The Meta Material Dataset Class \"\"\" def __init__(self, ftr, lbl,", "1) h_space = np.linspace(h_min, h_max, space + 1) # Shuffles r,h arrays each", "bool_train=False) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) print('Number of Training samples", "with open(name, 'a') as datafile: for j, (geometry, spectra) in enumerate(train_loader): concate =", "+ 1) # Shuffles r,h arrays each iteration and then selects 0th element", "samples is {}'.format(len(ftrTest))) return train_loader, test_loader def gen_data(name): train_loader, test_loader = Prepare_Data(1, 10000,", "100 # Defines hypergeometric space of parameters to choose from space = 10", "== \"__main__\": train_loader, test_loader = Prepare_Data(1, 10000, 1000) with open('toy_data/mm1d_6.csv', 'a') as datafile:", "/ num_freq) # e1 = np.divide(np.multiply(np.power(wp, 2), np.add(np.power(w0, 2), -np.power(w, 2))), # np.add(np.power(np.add(np.power(w0,", "labels.size / sets print('Size of Features is %i, Size of Labels is %i'", "r_max = 200 h_min = 20 h_max = 100 # Defines hypergeometric space", "Class \"\"\" def __init__(self, ftr, lbl, bool_train): \"\"\" Instantiate the Dataset Object :param", "lblsize)) print('There are %i datasets:' % sets) ftrTrain, ftrTest, lblTrain, lblTest = train_test_split(features,", "geometry and spectra def Make_MM_Model(n): r, h = MM_Geom(n) spectra = np.zeros(300) geom", "of Labels is %i' % (ftrsize, lblsize)) print('There are %i datasets:' % sets)", "range(n): w0 = 100 / h[i] wp = (1 / 100) * np.sqrt(np.pi)", "test_loader = Prepare_Data(1, 10000, 1000) with open(name, 'a') as datafile: for j, (geometry,", "space + 1) # Shuffles r,h arrays each iteration and then selects 0th", "Omar's code # Make geometry samples def MM_Geom(n): # Parameter bounds for metamaterial", "__getitem__(self, ind): return self.ftr[ind, :], self.lbl[ind, :] ## Copied from Omar's code #", "Defines hypergeometric space of parameters to choose from space = 10 r_space =", "= np.concatenate([geometry, spectra], axis=1) # print(np.shape(concate)) np.savetxt(datafile, concate, delimiter=',') if __name__ == \"__main__\":", "batch_size): features = [] labels = [] for i in range(sets): geom, spectra", ":], self.lbl[ind, :] ## Copied from Omar's code # Make geometry samples def", "np.multiply(w, g)), np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), np.multiply(np.power(w, 2), np.power(g, 2)))) return w,", "train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) print('Number of Training samples is", "torch.utils.data.DataLoader(test_data, batch_size=batch_size) print('Number of Training samples is {}'.format(len(ftrTrain))) print('Number of Test samples is", "(ftrsize, lblsize)) print('There are %i datasets:' % sets) ftrTrain, ftrTest, lblTrain, lblTest =", "return self.len def __getitem__(self, ind): return self.ftr[ind, :], self.lbl[ind, :] ## Copied from", "# Make geometry and spectra def Make_MM_Model(n): r, h = MM_Geom(n) spectra =", "__init__(self, ftr, lbl, bool_train): \"\"\" Instantiate the Dataset Object :param ftr: the features", "ftr self.lbl = lbl self.bool_train = bool_train self.len = len(ftr) def __len__(self): return", "j, (geometry, spectra) in enumerate(train_loader): concate = np.concatenate([geometry, spectra], axis=1) # print(np.shape(concate)) np.savetxt(datafile,", "for metamaterial radius and height r_min = 20 r_max = 200 h_min =", "{}'.format(len(ftrTrain))) print('Number of Test samples is {}'.format(len(ftrTest))) return train_loader, test_loader def gen_data(name): train_loader,", "Lorentzian(w0, wp, g) spectra += e2 return geom, spectra # Calculate Lorentzian function", "features which is always the Geometry !! :param lbl: the labels, which is", "__name__ == \"__main__\": train_loader, test_loader = Prepare_Data(1, 10000, 1000) with open('toy_data/mm1d_6.csv', 'a') as", "for i in range(n): w0 = 100 / h[i] wp = (1 /", "self.lbl = lbl self.bool_train = bool_train self.len = len(ftr) def __len__(self): return self.len", "ftr: the features which is always the Geometry !! :param lbl: the labels,", "r, h # Make geometry and spectra def Make_MM_Model(n): r, h = MM_Geom(n)", "g)), np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), np.multiply(np.power(w, 2), np.power(g, 2)))) return w, e2", "spectra += e2 return geom, spectra # Calculate Lorentzian function to get spectra", "for i in range(sets): geom, spectra = Make_MM_Model(osc) features.append(geom) labels.append(spectra) features = np.array(features,", "Prepare_Data(osc, sets, batch_size): features = [] labels = [] for i in range(sets):", "= 5 num_freq = 300 w = np.arange(freq_low, freq_high, (freq_high - freq_low) /", "for j, (geometry, spectra) in enumerate(train_loader): concate = np.concatenate([geometry, spectra], axis=1) #print(np.shape(concate)) np.savetxt(datafile,", "is %i, Size of Labels is %i' % (ftrsize, lblsize)) print('There are %i", "100 / h[i] wp = (1 / 100) * np.sqrt(np.pi) * r[i] g", "sklearn.model_selection import train_test_split # Define the class for the Meta-material dataset class MetaMaterialDataSet(Dataset):", "\"\"\" Instantiate the Dataset Object :param ftr: the features which is always the", "def MM_Geom(n): # Parameter bounds for metamaterial radius and height r_min = 20", "e1 = np.divide(np.multiply(np.power(wp, 2), np.add(np.power(w0, 2), -np.power(w, 2))), # np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)),", "delimiter=',') if __name__ == \"__main__\": train_loader, test_loader = Prepare_Data(1, 10000, 1000) with open('toy_data/mm1d_6.csv',", "which is always the Geometry !! :param lbl: the labels, which is always", "## Copied from Omar's code # Make geometry samples def MM_Geom(n): # Parameter", "spectra def Make_MM_Model(n): r, h = MM_Geom(n) spectra = np.zeros(300) geom = np.concatenate((r,", "features.size / sets lblsize = labels.size / sets print('Size of Features is %i,", "np.arange(freq_low, freq_high, (freq_high - freq_low) / num_freq) # e1 = np.divide(np.multiply(np.power(wp, 2), np.add(np.power(w0,", "= np.linspace(r_min, r_max, space + 1) h_space = np.linspace(h_min, h_max, space + 1)", "= torch.utils.data.DataLoader(test_data, batch_size=batch_size) print('Number of Training samples is {}'.format(len(ftrTrain))) print('Number of Test samples", "= (1 / 1000) * np.sqrt(np.pi) * r[i] w, e2 = Lorentzian(w0, wp,", "w, e2 = Lorentzian(w0, wp, g) spectra += e2 return geom, spectra #", "random n x n parameter set r, h = np.zeros(n, dtype=float), np.zeros(n, dtype=float)", "x n parameter set r, h = np.zeros(n, dtype=float), np.zeros(n, dtype=float) for i", "return w, e2 # Generates randomized dataset of simulated spectra for training and", "and height r_min = 20 r_max = 200 h_min = 20 h_max =", "w = np.arange(freq_low, freq_high, (freq_high - freq_low) / num_freq) # e1 = np.divide(np.multiply(np.power(wp,", "return train_loader, test_loader def gen_data(name): train_loader, test_loader = Prepare_Data(1, 10000, 1000) with open(name,", "= np.array(labels, dtype='float32') ftrsize = features.size / sets lblsize = labels.size / sets", "print(np.shape(concate)) np.savetxt(datafile, concate, delimiter=',') if __name__ == \"__main__\": train_loader, test_loader = Prepare_Data(1, 10000,", "choose from space = 10 r_space = np.linspace(r_min, r_max, space + 1) h_space", "code # Make geometry samples def MM_Geom(n): # Parameter bounds for metamaterial radius", "%i, Size of Labels is %i' % (ftrsize, lblsize)) print('There are %i datasets:'", "as pd import torch from torch.utils.data import Dataset from sklearn.model_selection import train_test_split #", "features.append(geom) labels.append(spectra) features = np.array(features, dtype='float32') labels = np.array(labels, dtype='float32') ftrsize = features.size", "-np.power(w, 2)), 2), np.multiply(np.power(w, 2), np.power(g, 2)))) return w, e2 # Generates randomized", "= 200 h_min = 20 h_max = 100 # Defines hypergeometric space of", "= Prepare_Data(1, 10000, 1000) with open('toy_data/mm1d_6.csv', 'a') as datafile: for j, (geometry, spectra)", "function to get spectra def Lorentzian(w0, wp, g): freq_low = 0 freq_high =", "Make geometry samples def MM_Geom(n): # Parameter bounds for metamaterial radius and height", "= 0 freq_high = 5 num_freq = 300 w = np.arange(freq_low, freq_high, (freq_high", "the Dataset Object :param ftr: the features which is always the Geometry !!", "# Define the class for the Meta-material dataset class MetaMaterialDataSet(Dataset): \"\"\" The Meta", "MetaMaterialDataSet(ftrTrain, lblTrain, bool_train=True) test_data = MetaMaterialDataSet(ftrTest, lblTest, bool_train=False) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size) test_loader", "lblTrain, bool_train=True) test_data = MetaMaterialDataSet(ftrTest, lblTest, bool_train=False) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size) test_loader =", "import numpy as np import matplotlib.pyplot as plt import pandas as pd import", "= labels.size / sets print('Size of Features is %i, Size of Labels is", "r_max, space + 1) h_space = np.linspace(h_min, h_max, space + 1) # Shuffles", "dataset of simulated spectra for training and testing def Prepare_Data(osc, sets, batch_size): features", "= train_test_split(features, labels, test_size=0.2, random_state=1234) train_data = MetaMaterialDataSet(ftrTrain, lblTrain, bool_train=True) test_data = MetaMaterialDataSet(ftrTest,", "h_space = np.linspace(h_min, h_max, space + 1) # Shuffles r,h arrays each iteration", "(freq_high - freq_low) / num_freq) # e1 = np.divide(np.multiply(np.power(wp, 2), np.add(np.power(w0, 2), -np.power(w,", "np.add(np.power(w0, 2), -np.power(w, 2))), # np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), # np.multiply(np.power(w, 2),", "self.bool_train = bool_train self.len = len(ftr) def __len__(self): return self.len def __getitem__(self, ind):", "open('toy_data/mm1d_6.csv', 'a') as datafile: for j, (geometry, spectra) in enumerate(train_loader): concate = np.concatenate([geometry,", "for the Meta-material dataset class MetaMaterialDataSet(Dataset): \"\"\" The Meta Material Dataset Class \"\"\"", "% sets) ftrTrain, ftrTest, lblTrain, lblTest = train_test_split(features, labels, test_size=0.2, random_state=1234) train_data =", "test_loader def gen_data(name): train_loader, test_loader = Prepare_Data(1, 10000, 1000) with open(name, 'a') as", "bool_train: \"\"\" self.ftr = ftr self.lbl = lbl self.bool_train = bool_train self.len =", "lblTest = train_test_split(features, labels, test_size=0.2, random_state=1234) train_data = MetaMaterialDataSet(ftrTrain, lblTrain, bool_train=True) test_data =", "space = 10 r_space = np.linspace(r_min, r_max, space + 1) h_space = np.linspace(h_min,", "selects 0th element to generate random n x n parameter set r, h", "e2 return geom, spectra # Calculate Lorentzian function to get spectra def Lorentzian(w0,", "MM_Geom(n): # Parameter bounds for metamaterial radius and height r_min = 20 r_max", "axis=1) # print(np.shape(concate)) np.savetxt(datafile, concate, delimiter=',') if __name__ == \"__main__\": train_loader, test_loader =", "r[i] = r_space[0] h[i] = h_space[0] return r, h # Make geometry and", ":param ftr: the features which is always the Geometry !! :param lbl: the", "np.random.shuffle(h_space) r[i] = r_space[0] h[i] = h_space[0] return r, h # Make geometry", "randomized dataset of simulated spectra for training and testing def Prepare_Data(osc, sets, batch_size):", "height r_min = 20 r_max = 200 h_min = 20 h_max = 100", "the labels, which is always the Spectra !! :param bool_train: \"\"\" self.ftr =", "test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) print('Number of Training samples is {}'.format(len(ftrTrain))) print('Number of Test", "is {}'.format(len(ftrTest))) return train_loader, test_loader def gen_data(name): train_loader, test_loader = Prepare_Data(1, 10000, 1000)", "* r[i] w, e2 = Lorentzian(w0, wp, g) spectra += e2 return geom,", "is %i' % (ftrsize, lblsize)) print('There are %i datasets:' % sets) ftrTrain, ftrTest,", "Dataset Object :param ftr: the features which is always the Geometry !! :param", "2), -np.power(w, 2)), 2), np.multiply(np.power(w, 2), np.power(g, 2)))) return w, e2 # Generates", "[] labels = [] for i in range(sets): geom, spectra = Make_MM_Model(osc) features.append(geom)", "get spectra def Lorentzian(w0, wp, g): freq_low = 0 freq_high = 5 num_freq", "= [] labels = [] for i in range(sets): geom, spectra = Make_MM_Model(osc)", "Size of Labels is %i' % (ftrsize, lblsize)) print('There are %i datasets:' %", "freq_low) / num_freq) # e1 = np.divide(np.multiply(np.power(wp, 2), np.add(np.power(w0, 2), -np.power(w, 2))), #", "r[i] w, e2 = Lorentzian(w0, wp, g) spectra += e2 return geom, spectra", "-np.power(w, 2))), # np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2), # np.multiply(np.power(w, 2), np.power(g, 2))))", ":param bool_train: \"\"\" self.ftr = ftr self.lbl = lbl self.bool_train = bool_train self.len", "if __name__ == \"__main__\": train_loader, test_loader = Prepare_Data(1, 10000, 1000) with open('toy_data/mm1d_6.csv', 'a')", "features = np.array(features, dtype='float32') labels = np.array(labels, dtype='float32') ftrsize = features.size / sets", "bool_train=True) test_data = MetaMaterialDataSet(ftrTest, lblTest, bool_train=False) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size) test_loader = torch.utils.data.DataLoader(test_data,", "dtype=float) for i in range(n): np.random.shuffle(r_space) np.random.shuffle(h_space) r[i] = r_space[0] h[i] = h_space[0]", "return self.ftr[ind, :], self.lbl[ind, :] ## Copied from Omar's code # Make geometry", "spectra # Calculate Lorentzian function to get spectra def Lorentzian(w0, wp, g): freq_low", "in range(n): w0 = 100 / h[i] wp = (1 / 100) *", "lblTest, bool_train=False) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) print('Number of Training", "batch_size=batch_size) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) print('Number of Training samples is {}'.format(len(ftrTrain))) print('Number of", "r[i] g = (1 / 1000) * np.sqrt(np.pi) * r[i] w, e2 =", "train_test_split # Define the class for the Meta-material dataset class MetaMaterialDataSet(Dataset): \"\"\" The", "lbl: the labels, which is always the Spectra !! :param bool_train: \"\"\" self.ftr", "is always the Spectra !! :param bool_train: \"\"\" self.ftr = ftr self.lbl =", "freq_high, (freq_high - freq_low) / num_freq) # e1 = np.divide(np.multiply(np.power(wp, 2), np.add(np.power(w0, 2),", "to get spectra def Lorentzian(w0, wp, g): freq_low = 0 freq_high = 5", "= 300 w = np.arange(freq_low, freq_high, (freq_high - freq_low) / num_freq) # e1", "concate = np.concatenate([geometry, spectra], axis=1) # print(np.shape(concate)) np.savetxt(datafile, concate, delimiter=',') if __name__ ==", "h_min = 20 h_max = 100 # Defines hypergeometric space of parameters to", "r_space = np.linspace(r_min, r_max, space + 1) h_space = np.linspace(h_min, h_max, space +", "0 freq_high = 5 num_freq = 300 w = np.arange(freq_low, freq_high, (freq_high -", "\"\"\" The Meta Material Dataset Class \"\"\" def __init__(self, ftr, lbl, bool_train): \"\"\"", "print('Number of Training samples is {}'.format(len(ftrTrain))) print('Number of Test samples is {}'.format(len(ftrTest))) return", "the features which is always the Geometry !! :param lbl: the labels, which" ]
[ "5) p.kernel_initializers() p.layers(12) p.neurons(10, 100, 5) p.lr() p.optimizers('multi_label') p.shapes() p.shapes_slope() p.automated('sloped') return \"Finished", "import talos as ta def test_params_object(): '''Tests the object from Params()''' print('Start testing", "p.shapes_slope() p.automated() p = ta.Params(replace=False) # with arguments p.activations() p.batch_size(10, 100, 5) p.dropout()", "p.batch_size(10, 100, 5) p.dropout() p.epochs(10, 100, 5) p.kernel_initializers() p.layers(12) p.neurons(10, 100, 5) p.lr()", "5) p.dropout() p.epochs(10, 100, 5) p.kernel_initializers() p.layers(12) p.neurons(10, 100, 5) p.lr() p.optimizers('multi_label') p.shapes()", "arguments p.activations() p.batch_size() p.dropout() p.epochs() p.kernel_initializers() p.layers() p.neurons() p.lr() p.optimizers() p.shapes() p.shapes_slope() p.automated()", "the object from Params()''' print('Start testing Params object...') p = ta.Params() # without", "test_params_object(): '''Tests the object from Params()''' print('Start testing Params object...') p = ta.Params()", "without arguments p.activations() p.batch_size() p.dropout() p.epochs() p.kernel_initializers() p.layers() p.neurons() p.lr() p.optimizers() p.shapes() p.shapes_slope()", "'''Tests the object from Params()''' print('Start testing Params object...') p = ta.Params() #", "p = ta.Params() # without arguments p.activations() p.batch_size() p.dropout() p.epochs() p.kernel_initializers() p.layers() p.neurons()", "Params object...') p = ta.Params() # without arguments p.activations() p.batch_size() p.dropout() p.epochs() p.kernel_initializers()", "arguments p.activations() p.batch_size(10, 100, 5) p.dropout() p.epochs(10, 100, 5) p.kernel_initializers() p.layers(12) p.neurons(10, 100,", "talos as ta def test_params_object(): '''Tests the object from Params()''' print('Start testing Params", "as ta def test_params_object(): '''Tests the object from Params()''' print('Start testing Params object...')", "p.kernel_initializers() p.layers(12) p.neurons(10, 100, 5) p.lr() p.optimizers('multi_label') p.shapes() p.shapes_slope() p.automated('sloped') return \"Finished testing", "p.layers() p.neurons() p.lr() p.optimizers() p.shapes() p.shapes_slope() p.automated() p = ta.Params(replace=False) # with arguments", "def test_params_object(): '''Tests the object from Params()''' print('Start testing Params object...') p =", "p.activations() p.batch_size() p.dropout() p.epochs() p.kernel_initializers() p.layers() p.neurons() p.lr() p.optimizers() p.shapes() p.shapes_slope() p.automated() p", "from Params()''' print('Start testing Params object...') p = ta.Params() # without arguments p.activations()", "= ta.Params() # without arguments p.activations() p.batch_size() p.dropout() p.epochs() p.kernel_initializers() p.layers() p.neurons() p.lr()", "p.neurons() p.lr() p.optimizers() p.shapes() p.shapes_slope() p.automated() p = ta.Params(replace=False) # with arguments p.activations()", "100, 5) p.dropout() p.epochs(10, 100, 5) p.kernel_initializers() p.layers(12) p.neurons(10, 100, 5) p.lr() p.optimizers('multi_label')", "testing Params object...') p = ta.Params() # without arguments p.activations() p.batch_size() p.dropout() p.epochs()", "with arguments p.activations() p.batch_size(10, 100, 5) p.dropout() p.epochs(10, 100, 5) p.kernel_initializers() p.layers(12) p.neurons(10,", "p.optimizers() p.shapes() p.shapes_slope() p.automated() p = ta.Params(replace=False) # with arguments p.activations() p.batch_size(10, 100,", "Params()''' print('Start testing Params object...') p = ta.Params() # without arguments p.activations() p.batch_size()", "object from Params()''' print('Start testing Params object...') p = ta.Params() # without arguments", "object...') p = ta.Params() # without arguments p.activations() p.batch_size() p.dropout() p.epochs() p.kernel_initializers() p.layers()", "ta.Params() # without arguments p.activations() p.batch_size() p.dropout() p.epochs() p.kernel_initializers() p.layers() p.neurons() p.lr() p.optimizers()", "p = ta.Params(replace=False) # with arguments p.activations() p.batch_size(10, 100, 5) p.dropout() p.epochs(10, 100,", "p.neurons(10, 100, 5) p.lr() p.optimizers('multi_label') p.shapes() p.shapes_slope() p.automated('sloped') return \"Finished testing Params object!\"", "p.automated() p = ta.Params(replace=False) # with arguments p.activations() p.batch_size(10, 100, 5) p.dropout() p.epochs(10,", "p.dropout() p.epochs(10, 100, 5) p.kernel_initializers() p.layers(12) p.neurons(10, 100, 5) p.lr() p.optimizers('multi_label') p.shapes() p.shapes_slope()", "p.batch_size() p.dropout() p.epochs() p.kernel_initializers() p.layers() p.neurons() p.lr() p.optimizers() p.shapes() p.shapes_slope() p.automated() p =", "p.activations() p.batch_size(10, 100, 5) p.dropout() p.epochs(10, 100, 5) p.kernel_initializers() p.layers(12) p.neurons(10, 100, 5)", "p.layers(12) p.neurons(10, 100, 5) p.lr() p.optimizers('multi_label') p.shapes() p.shapes_slope() p.automated('sloped') return \"Finished testing Params", "ta.Params(replace=False) # with arguments p.activations() p.batch_size(10, 100, 5) p.dropout() p.epochs(10, 100, 5) p.kernel_initializers()", "print('Start testing Params object...') p = ta.Params() # without arguments p.activations() p.batch_size() p.dropout()", "p.dropout() p.epochs() p.kernel_initializers() p.layers() p.neurons() p.lr() p.optimizers() p.shapes() p.shapes_slope() p.automated() p = ta.Params(replace=False)", "p.kernel_initializers() p.layers() p.neurons() p.lr() p.optimizers() p.shapes() p.shapes_slope() p.automated() p = ta.Params(replace=False) # with", "= ta.Params(replace=False) # with arguments p.activations() p.batch_size(10, 100, 5) p.dropout() p.epochs(10, 100, 5)", "p.epochs(10, 100, 5) p.kernel_initializers() p.layers(12) p.neurons(10, 100, 5) p.lr() p.optimizers('multi_label') p.shapes() p.shapes_slope() p.automated('sloped')", "<filename>test/core_tests/test_params_object.py import talos as ta def test_params_object(): '''Tests the object from Params()''' print('Start", "# with arguments p.activations() p.batch_size(10, 100, 5) p.dropout() p.epochs(10, 100, 5) p.kernel_initializers() p.layers(12)", "# without arguments p.activations() p.batch_size() p.dropout() p.epochs() p.kernel_initializers() p.layers() p.neurons() p.lr() p.optimizers() p.shapes()", "100, 5) p.kernel_initializers() p.layers(12) p.neurons(10, 100, 5) p.lr() p.optimizers('multi_label') p.shapes() p.shapes_slope() p.automated('sloped') return", "p.lr() p.optimizers() p.shapes() p.shapes_slope() p.automated() p = ta.Params(replace=False) # with arguments p.activations() p.batch_size(10,", "p.shapes() p.shapes_slope() p.automated() p = ta.Params(replace=False) # with arguments p.activations() p.batch_size(10, 100, 5)", "p.epochs() p.kernel_initializers() p.layers() p.neurons() p.lr() p.optimizers() p.shapes() p.shapes_slope() p.automated() p = ta.Params(replace=False) #", "ta def test_params_object(): '''Tests the object from Params()''' print('Start testing Params object...') p" ]
[ "in range(0,self.FBGFileNum): FBGPathtemp=self.FBGPaths[i].replace(\"\\\\\",\"/\",99) FBGDataTemp=pd.read_csv(FBGPathtemp,sep=self.FBGSep,names=self.FBGColNames,skiprows=self.FBGSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour self.FBGData= pd.concat([self.FBGData,FBGDataTemp],ignore_index=True) #Delete collum", "Data (skip head) self.TempSep=TempSep # Data separador self.TempColNumb=TempColNumb self.TempColNames=TempColNames self.TimeCorrect=TimeCorrect #FBG Input self.FBGPaths=FBGPaths", "the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the", "Public License as published by the Free Software Foundation, either version 3 of", "for more details. You should have received a copy of the GNU General", "part of FBG_ReStrain. FBG_ReStrain is free software: you can redistribute it and/or modify", "Data separador self.TempColNumb=TempColNumb self.TempColNames=TempColNames self.TimeCorrect=TimeCorrect #FBG Input self.FBGPaths=FBGPaths self.FBGFileNum=FBGFileNum self.FBGSkipRows=FBGSkipRows#Skip to the begin", "and Temp files ---------- \"\"\" self.SyncData= pd.concat([self.FBGData,self.TempData],axis=1, join='inner') #Create collum with increment/sample serie", "to the begin of the Data (skip head) self.FBGSep=FBGSep # Data separador self.FBGColNumb=FBGColNumb", "and/or modify it under the terms of the GNU General Public License as", "not, see <http://www.gnu.org/licenses/> \"\"\" #Packages import pandas as pd import datetime as dt", "and FBG Copyright (C) <NAME> 2015 DTU Wind Energy Author: <NAME> Email: <EMAIL>;", "the GNU General Public License as published by the Free Software Foundation, either", "collum Sample self.FBGData=self.FBGData.drop('Sample',1) #Organixe index self.FBGData=self.FBGData.set_index('Date_Time') def Syncron(self): \"\"\" Syncronize the FBG files", "the License, or (at your option) any later version. FBG_ReStrain is distributed in", "Last revision: 02-08-2016 ***License***: This file is part of FBG_ReStrain. FBG_ReStrain is free", "self.FBGData= pd.concat([self.FBGData,FBGDataTemp],ignore_index=True) #Delete collum Sample self.FBGData=self.FBGData.drop('Sample',1) #Organixe index self.FBGData=self.FBGData.set_index('Date_Time') def Syncron(self): \"\"\" Syncronize", "Data [2015.07.16.11.31.02 ; 2015.07.16.12.31.01].txt'] FBGFileNum=3 FBGColNumb=5 FBGColNames=['Date','Time','Sample','FBG1','FBG2'] FBGSkipRows=2 FBGSep='\\t' test=FBG_Temp_Loading(TempPath,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames) #test.FBGData['FBG1'].plot() test.Syncron() #To", "Temp Loading Class file Input_Parameters: ---------- \"\"\" #Temp Input self.TempPaths=TempPaths self.TempFileNum=TempFileNum self.TempSkipRows=TempSkipRows #Skip", "along with Foobar. If not, see <http://www.gnu.org/licenses/> \"\"\" #Packages import pandas as pd", "Initialized the FBG and Temp Loading Class file Input_Parameters: ---------- \"\"\" #Temp Input", "[2015.07.16.09.31.02 ; 2015.07.16.10.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.10.31.02 ; 2015.07.16.11.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.11.31.02 ; 2015.07.16.12.31.01].txt'] FBGFileNum=3 FBGColNumb=5", "---------- \"\"\" #Temp Input self.TempPaths=TempPaths self.TempFileNum=TempFileNum self.TempSkipRows=TempSkipRows #Skip to the begin of the", "Copyright (C) <NAME> 2015 DTU Wind Energy Author: <NAME> Email: <EMAIL>; <EMAIL> Last", "Load and Sync two files: Thermocouple and FBG Copyright (C) <NAME> 2015 DTU", "i in range (0,self.TempFileNum): TempPathtemp=self.TempPaths[i].replace(\"\\\\\",\"/\",99) TempDataTemp=pd.read_csv(TempPathtemp,sep=self.TempSep,names=self.TempColNames,skiprows=self.TempSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour self.TempData=pd.concat([self.TempData,TempDataTemp],ignore_index=True) #Correct", "Input self.TempPaths=TempPaths self.TempFileNum=TempFileNum self.TempSkipRows=TempSkipRows #Skip to the begin of the Data (skip head)", "with increment/sample serie #self.SyncData['Increment']=pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index) self.SyncData.insert(0,'Increment/Sample',pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index)) #Commands used to run this file", "#Merge Date and Hour self.FBGData= pd.concat([self.FBGData,FBGDataTemp],ignore_index=True) #Delete collum Sample self.FBGData=self.FBGData.drop('Sample',1) #Organixe index self.FBGData=self.FBGData.set_index('Date_Time')", "General Public License as published by the Free Software Foundation, either version 3", "Input_Parameters: ---------- \"\"\" #Temp Input self.TempPaths=TempPaths self.TempFileNum=TempFileNum self.TempSkipRows=TempSkipRows #Skip to the begin of", "it under the terms of the GNU General Public License as published by", "FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more", "#Load File #Temp self.TempData=pd.DataFrame() for i in range (0,self.TempFileNum): TempPathtemp=self.TempPaths[i].replace(\"\\\\\",\"/\",99) TempDataTemp=pd.read_csv(TempPathtemp,sep=self.TempSep,names=self.TempColNames,skiprows=self.TempSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge", "'Time']],dayfirst=True) #Merge Date and Hour self.FBGData= pd.concat([self.FBGData,FBGDataTemp],ignore_index=True) #Delete collum Sample self.FBGData=self.FBGData.drop('Sample',1) #Organixe index", "terms of the GNU General Public License as published by the Free Software", "TempSkipRows=1 TempSep=';' FBGPaths=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.09.31.02 ; 2015.07.16.10.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.10.31.02 ; 2015.07.16.11.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.11.31.02", "Temp files ---------- \"\"\" self.SyncData= pd.concat([self.FBGData,self.TempData],axis=1, join='inner') #Create collum with increment/sample serie #self.SyncData['Increment']=pd.Series(range(0,len(self.SyncData)),", "FBGFileNum=3 FBGColNumb=5 FBGColNames=['Date','Time','Sample','FBG1','FBG2'] FBGSkipRows=2 FBGSep='\\t' test=FBG_Temp_Loading(TempPath,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames) #test.FBGData['FBG1'].plot() test.Syncron() #To save test.FBGData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\FBG.csv', sep=';') test.TempData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\temp.csv',", "separador self.FBGColNumb=FBGColNumb self.FBGColNames=FBGColNames #Load File #Temp self.TempData=pd.DataFrame() for i in range (0,self.TempFileNum): TempPathtemp=self.TempPaths[i].replace(\"\\\\\",\"/\",99)", "can redistribute it and/or modify it under the terms of the GNU General", "Data [2015.07.16.09.31.02 ; 2015.07.16.10.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.10.31.02 ; 2015.07.16.11.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.11.31.02 ; 2015.07.16.12.31.01].txt'] FBGFileNum=3", "<EMAIL> Last revision: 02-08-2016 ***License***: This file is part of FBG_ReStrain. FBG_ReStrain is", "#Packages import pandas as pd import datetime as dt class FBG_Temp_Loading(object): def __init__(self,TempPaths,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames,TimeCorrect=0):", "def Syncron(self): \"\"\" Syncronize the FBG files and Temp files ---------- \"\"\" self.SyncData=", "the FBG and Temp Loading Class file Input_Parameters: ---------- \"\"\" #Temp Input self.TempPaths=TempPaths", "files and Temp files ---------- \"\"\" self.SyncData= pd.concat([self.FBGData,self.TempData],axis=1, join='inner') #Create collum with increment/sample", "See the GNU General Public License for more details. You should have received", "Email: <EMAIL>; <EMAIL> Last revision: 02-08-2016 ***License***: This file is part of FBG_ReStrain.", "Foundation, either version 3 of the License, or (at your option) any later", "ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A", "<http://www.gnu.org/licenses/> \"\"\" #Packages import pandas as pd import datetime as dt class FBG_Temp_Loading(object):", "\"\"\" TempPath=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\sync\\\\temp.csv'] TempFileNum=1 TempColNumb=5 TempColNames=['Date','Time','Temp1','Temp2','Temp3'] TempSkipRows=1 TempSep=';' FBGPaths=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.09.31.02 ; 2015.07.16.10.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data", "self.TempSkipRows=TempSkipRows #Skip to the begin of the Data (skip head) self.TempSep=TempSep # Data", "2015.07.16.11.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.11.31.02 ; 2015.07.16.12.31.01].txt'] FBGFileNum=3 FBGColNumb=5 FBGColNames=['Date','Time','Sample','FBG1','FBG2'] FBGSkipRows=2 FBGSep='\\t' test=FBG_Temp_Loading(TempPath,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames) #test.FBGData['FBG1'].plot() test.Syncron()", "begin of the Data (skip head) self.TempSep=TempSep # Data separador self.TempColNumb=TempColNumb self.TempColNames=TempColNames self.TimeCorrect=TimeCorrect", "software: you can redistribute it and/or modify it under the terms of the", "or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for", "two files: Thermocouple and FBG Copyright (C) <NAME> 2015 DTU Wind Energy Author:", "#Skip to the begin of the Data (skip head) self.TempSep=TempSep # Data separador", "should have received a copy of the GNU General Public License along with", "TempFileNum=1 TempColNumb=5 TempColNames=['Date','Time','Temp1','Temp2','Temp3'] TempSkipRows=1 TempSep=';' FBGPaths=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.09.31.02 ; 2015.07.16.10.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.10.31.02 ;", "received a copy of the GNU General Public License along with Foobar. If", "#Delete collum Sample self.FBGData=self.FBGData.drop('Sample',1) #Organixe index self.FBGData=self.FBGData.set_index('Date_Time') def Syncron(self): \"\"\" Syncronize the FBG", "License for more details. You should have received a copy of the GNU", "General Public License for more details. You should have received a copy of", "Public License along with Foobar. If not, see <http://www.gnu.org/licenses/> \"\"\" #Packages import pandas", "Free Software Foundation, either version 3 of the License, or (at your option)", "GNU General Public License as published by the Free Software Foundation, either version", "FBG_ReStrain is free software: you can redistribute it and/or modify it under the", "import pandas as pd import datetime as dt class FBG_Temp_Loading(object): def __init__(self,TempPaths,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames,TimeCorrect=0): \"\"\"", "file Input_Parameters: ---------- \"\"\" #Temp Input self.TempPaths=TempPaths self.TempFileNum=TempFileNum self.TempSkipRows=TempSkipRows #Skip to the begin", "it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty", "# Data separador self.TempColNumb=TempColNumb self.TempColNames=TempColNames self.TimeCorrect=TimeCorrect #FBG Input self.FBGPaths=FBGPaths self.FBGFileNum=FBGFileNum self.FBGSkipRows=FBGSkipRows#Skip to the", "Syncron(self): \"\"\" Syncronize the FBG files and Temp files ---------- \"\"\" self.SyncData= pd.concat([self.FBGData,self.TempData],axis=1,", "DTU Wind Energy Author: <NAME> Email: <EMAIL>; <EMAIL> Last revision: 02-08-2016 ***License***: This", "FBG_ReStrain is distributed in the hope that it will be useful, but WITHOUT", "files: Thermocouple and FBG Copyright (C) <NAME> 2015 DTU Wind Energy Author: <NAME>", "as pd import datetime as dt class FBG_Temp_Loading(object): def __init__(self,TempPaths,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames,TimeCorrect=0): \"\"\" Initialized the", "self.TempSep=TempSep # Data separador self.TempColNumb=TempColNumb self.TempColNames=TempColNames self.TimeCorrect=TimeCorrect #FBG Input self.FBGPaths=FBGPaths self.FBGFileNum=FBGFileNum self.FBGSkipRows=FBGSkipRows#Skip to", "class FBG_Temp_Loading(object): def __init__(self,TempPaths,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames,TimeCorrect=0): \"\"\" Initialized the FBG and Temp Loading Class file", "License, or (at your option) any later version. FBG_ReStrain is distributed in the", "FBGSkipRows=2 FBGSep='\\t' test=FBG_Temp_Loading(TempPath,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames) #test.FBGData['FBG1'].plot() test.Syncron() #To save test.FBGData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\FBG.csv', sep=';') test.TempData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\temp.csv', sep=';') test.SyncData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\res.csv', sep=';')", "without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "revision: 02-08-2016 ***License***: This file is part of FBG_ReStrain. FBG_ReStrain is free software:", "#Merge Date and Hour self.TempData=pd.concat([self.TempData,TempDataTemp],ignore_index=True) #Correct Time self.TempData['Date_Time']=self.TempData['Date_Time']+dt.timedelta(hours=self.TimeCorrect) #Date_Time timestamp self.TempData=self.TempData.set_index('Date_Time') #FBG self.FBGData=pd.DataFrame()", "details. You should have received a copy of the GNU General Public License", "the GUI \"\"\" TempPath=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\sync\\\\temp.csv'] TempFileNum=1 TempColNumb=5 TempColNames=['Date','Time','Temp1','Temp2','Temp3'] TempSkipRows=1 TempSep=';' FBGPaths=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.09.31.02 ;", "'Time']],dayfirst=True) #Merge Date and Hour self.TempData=pd.concat([self.TempData,TempDataTemp],ignore_index=True) #Correct Time self.TempData['Date_Time']=self.TempData['Date_Time']+dt.timedelta(hours=self.TimeCorrect) #Date_Time timestamp self.TempData=self.TempData.set_index('Date_Time') #FBG", "range (0,self.TempFileNum): TempPathtemp=self.TempPaths[i].replace(\"\\\\\",\"/\",99) TempDataTemp=pd.read_csv(TempPathtemp,sep=self.TempSep,names=self.TempColNames,skiprows=self.TempSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour self.TempData=pd.concat([self.TempData,TempDataTemp],ignore_index=True) #Correct Time self.TempData['Date_Time']=self.TempData['Date_Time']+dt.timedelta(hours=self.TimeCorrect)", "#Create collum with increment/sample serie #self.SyncData['Increment']=pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index) self.SyncData.insert(0,'Increment/Sample',pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index)) #Commands used to run", "have received a copy of the GNU General Public License along with Foobar.", "FBG files and Temp files ---------- \"\"\" self.SyncData= pd.concat([self.FBGData,self.TempData],axis=1, join='inner') #Create collum with", "pd import datetime as dt class FBG_Temp_Loading(object): def __init__(self,TempPaths,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames,TimeCorrect=0): \"\"\" Initialized the FBG", "Sync two files: Thermocouple and FBG Copyright (C) <NAME> 2015 DTU Wind Energy", "This file is part of FBG_ReStrain. FBG_ReStrain is free software: you can redistribute", "but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS", "of the License, or (at your option) any later version. FBG_ReStrain is distributed", "General Public License along with Foobar. If not, see <http://www.gnu.org/licenses/> \"\"\" #Packages import", "self.TimeCorrect=TimeCorrect #FBG Input self.FBGPaths=FBGPaths self.FBGFileNum=FBGFileNum self.FBGSkipRows=FBGSkipRows#Skip to the begin of the Data (skip", "FBGColNumb=5 FBGColNames=['Date','Time','Sample','FBG1','FBG2'] FBGSkipRows=2 FBGSep='\\t' test=FBG_Temp_Loading(TempPath,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames) #test.FBGData['FBG1'].plot() test.Syncron() #To save test.FBGData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\FBG.csv', sep=';') test.TempData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\temp.csv', sep=';')", "self.TempData['Date_Time']=self.TempData['Date_Time']+dt.timedelta(hours=self.TimeCorrect) #Date_Time timestamp self.TempData=self.TempData.set_index('Date_Time') #FBG self.FBGData=pd.DataFrame() for i in range(0,self.FBGFileNum): FBGPathtemp=self.FBGPaths[i].replace(\"\\\\\",\"/\",99) FBGDataTemp=pd.read_csv(FBGPathtemp,sep=self.FBGSep,names=self.FBGColNames,skiprows=self.FBGSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True)", "__init__(self,TempPaths,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames,TimeCorrect=0): \"\"\" Initialized the FBG and Temp Loading Class file Input_Parameters: ---------- \"\"\"", "separador self.TempColNumb=TempColNumb self.TempColNames=TempColNames self.TimeCorrect=TimeCorrect #FBG Input self.FBGPaths=FBGPaths self.FBGFileNum=FBGFileNum self.FBGSkipRows=FBGSkipRows#Skip to the begin of", "that it will be useful, but WITHOUT ANY WARRANTY; without even the implied", "self.SyncData= pd.concat([self.FBGData,self.TempData],axis=1, join='inner') #Create collum with increment/sample serie #self.SyncData['Increment']=pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index) self.SyncData.insert(0,'Increment/Sample',pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index)) #Commands", "collum with increment/sample serie #self.SyncData['Increment']=pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index) self.SyncData.insert(0,'Increment/Sample',pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index)) #Commands used to run this", "is free software: you can redistribute it and/or modify it under the terms", "in the hope that it will be useful, but WITHOUT ANY WARRANTY; without", "If not, see <http://www.gnu.org/licenses/> \"\"\" #Packages import pandas as pd import datetime as", "distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;", "this file without the GUI \"\"\" TempPath=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\sync\\\\temp.csv'] TempFileNum=1 TempColNumb=5 TempColNames=['Date','Time','Temp1','Temp2','Temp3'] TempSkipRows=1 TempSep=';' FBGPaths=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM", "the Data (skip head) self.FBGSep=FBGSep # Data separador self.FBGColNumb=FBGColNumb self.FBGColNames=FBGColNames #Load File #Temp", "WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR", "GNU General Public License for more details. You should have received a copy", "the begin of the Data (skip head) self.FBGSep=FBGSep # Data separador self.FBGColNumb=FBGColNumb self.FBGColNames=FBGColNames", "datetime as dt class FBG_Temp_Loading(object): def __init__(self,TempPaths,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames,TimeCorrect=0): \"\"\" Initialized the FBG and Temp", "and Temp Loading Class file Input_Parameters: ---------- \"\"\" #Temp Input self.TempPaths=TempPaths self.TempFileNum=TempFileNum self.TempSkipRows=TempSkipRows", "the GNU General Public License along with Foobar. If not, see <http://www.gnu.org/licenses/> \"\"\"", "<EMAIL>; <EMAIL> Last revision: 02-08-2016 ***License***: This file is part of FBG_ReStrain. FBG_ReStrain", "WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR", "import datetime as dt class FBG_Temp_Loading(object): def __init__(self,TempPaths,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames,TimeCorrect=0): \"\"\" Initialized the FBG and", "it and/or modify it under the terms of the GNU General Public License", "modify it under the terms of the GNU General Public License as published", "to the begin of the Data (skip head) self.TempSep=TempSep # Data separador self.TempColNumb=TempColNumb", "; 2015.07.16.11.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.11.31.02 ; 2015.07.16.12.31.01].txt'] FBGFileNum=3 FBGColNumb=5 FBGColNames=['Date','Time','Sample','FBG1','FBG2'] FBGSkipRows=2 FBGSep='\\t' test=FBG_Temp_Loading(TempPath,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames) #test.FBGData['FBG1'].plot()", "free software: you can redistribute it and/or modify it under the terms of", "i in range(0,self.FBGFileNum): FBGPathtemp=self.FBGPaths[i].replace(\"\\\\\",\"/\",99) FBGDataTemp=pd.read_csv(FBGPathtemp,sep=self.FBGSep,names=self.FBGColNames,skiprows=self.FBGSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour self.FBGData= pd.concat([self.FBGData,FBGDataTemp],ignore_index=True) #Delete", "\"\"\" #Temp Input self.TempPaths=TempPaths self.TempFileNum=TempFileNum self.TempSkipRows=TempSkipRows #Skip to the begin of the Data", "of the GNU General Public License as published by the Free Software Foundation,", "FBG and Temp Loading Class file Input_Parameters: ---------- \"\"\" #Temp Input self.TempPaths=TempPaths self.TempFileNum=TempFileNum", "begin of the Data (skip head) self.FBGSep=FBGSep # Data separador self.FBGColNumb=FBGColNumb self.FBGColNames=FBGColNames #Load", "FBGColNames=['Date','Time','Sample','FBG1','FBG2'] FBGSkipRows=2 FBGSep='\\t' test=FBG_Temp_Loading(TempPath,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames) #test.FBGData['FBG1'].plot() test.Syncron() #To save test.FBGData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\FBG.csv', sep=';') test.TempData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\temp.csv', sep=';') test.SyncData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\res.csv',", "of the Data (skip head) self.TempSep=TempSep # Data separador self.TempColNumb=TempColNumb self.TempColNames=TempColNames self.TimeCorrect=TimeCorrect #FBG", "Date and Hour self.FBGData= pd.concat([self.FBGData,FBGDataTemp],ignore_index=True) #Delete collum Sample self.FBGData=self.FBGData.drop('Sample',1) #Organixe index self.FBGData=self.FBGData.set_index('Date_Time') def", "FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.", "(skip head) self.TempSep=TempSep # Data separador self.TempColNumb=TempColNumb self.TempColNames=TempColNames self.TimeCorrect=TimeCorrect #FBG Input self.FBGPaths=FBGPaths self.FBGFileNum=FBGFileNum", "more details. You should have received a copy of the GNU General Public", "#Commands used to run this file without the GUI \"\"\" TempPath=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\sync\\\\temp.csv'] TempFileNum=1 TempColNumb=5", "#Correct Time self.TempData['Date_Time']=self.TempData['Date_Time']+dt.timedelta(hours=self.TimeCorrect) #Date_Time timestamp self.TempData=self.TempData.set_index('Date_Time') #FBG self.FBGData=pd.DataFrame() for i in range(0,self.FBGFileNum): FBGPathtemp=self.FBGPaths[i].replace(\"\\\\\",\"/\",99)", "for i in range (0,self.TempFileNum): TempPathtemp=self.TempPaths[i].replace(\"\\\\\",\"/\",99) TempDataTemp=pd.read_csv(TempPathtemp,sep=self.TempSep,names=self.TempColNames,skiprows=self.TempSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour self.TempData=pd.concat([self.TempData,TempDataTemp],ignore_index=True)", "used to run this file without the GUI \"\"\" TempPath=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\sync\\\\temp.csv'] TempFileNum=1 TempColNumb=5 TempColNames=['Date','Time','Temp1','Temp2','Temp3']", "the hope that it will be useful, but WITHOUT ANY WARRANTY; without even", "Class file Input_Parameters: ---------- \"\"\" #Temp Input self.TempPaths=TempPaths self.TempFileNum=TempFileNum self.TempSkipRows=TempSkipRows #Skip to the", "#self.SyncData['Increment']=pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index) self.SyncData.insert(0,'Increment/Sample',pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index)) #Commands used to run this file without the GUI", "will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of", "self.SyncData.insert(0,'Increment/Sample',pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index)) #Commands used to run this file without the GUI \"\"\" TempPath=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\sync\\\\temp.csv']", "pd.concat([self.FBGData,self.TempData],axis=1, join='inner') #Create collum with increment/sample serie #self.SyncData['Increment']=pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index) self.SyncData.insert(0,'Increment/Sample',pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index)) #Commands used", "in range (0,self.TempFileNum): TempPathtemp=self.TempPaths[i].replace(\"\\\\\",\"/\",99) TempDataTemp=pd.read_csv(TempPathtemp,sep=self.TempSep,names=self.TempColNames,skiprows=self.TempSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour self.TempData=pd.concat([self.TempData,TempDataTemp],ignore_index=True) #Correct Time", "#Date_Time timestamp self.TempData=self.TempData.set_index('Date_Time') #FBG self.FBGData=pd.DataFrame() for i in range(0,self.FBGFileNum): FBGPathtemp=self.FBGPaths[i].replace(\"\\\\\",\"/\",99) FBGDataTemp=pd.read_csv(FBGPathtemp,sep=self.FBGSep,names=self.FBGColNames,skiprows=self.FBGSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge", "warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General", "implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU", "#FBG self.FBGData=pd.DataFrame() for i in range(0,self.FBGFileNum): FBGPathtemp=self.FBGPaths[i].replace(\"\\\\\",\"/\",99) FBGDataTemp=pd.read_csv(FBGPathtemp,sep=self.FBGSep,names=self.FBGColNames,skiprows=self.FBGSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour", "even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See", "self.FBGData=self.FBGData.drop('Sample',1) #Organixe index self.FBGData=self.FBGData.set_index('Date_Time') def Syncron(self): \"\"\" Syncronize the FBG files and Temp", "<NAME> 2015 DTU Wind Energy Author: <NAME> Email: <EMAIL>; <EMAIL> Last revision: 02-08-2016", "License as published by the Free Software Foundation, either version 3 of the", "PARTICULAR PURPOSE. See the GNU General Public License for more details. You should", "FBGSep='\\t' test=FBG_Temp_Loading(TempPath,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames) #test.FBGData['FBG1'].plot() test.Syncron() #To save test.FBGData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\FBG.csv', sep=';') test.TempData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\temp.csv', sep=';') test.SyncData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\res.csv', sep=';') \"\"\"", "the FBG files and Temp files ---------- \"\"\" self.SyncData= pd.concat([self.FBGData,self.TempData],axis=1, join='inner') #Create collum", "to run this file without the GUI \"\"\" TempPath=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\sync\\\\temp.csv'] TempFileNum=1 TempColNumb=5 TempColNames=['Date','Time','Temp1','Temp2','Temp3'] TempSkipRows=1", "as published by the Free Software Foundation, either version 3 of the License,", "self.TempFileNum=TempFileNum self.TempSkipRows=TempSkipRows #Skip to the begin of the Data (skip head) self.TempSep=TempSep #", "FBGPathtemp=self.FBGPaths[i].replace(\"\\\\\",\"/\",99) FBGDataTemp=pd.read_csv(FBGPathtemp,sep=self.FBGSep,names=self.FBGColNames,skiprows=self.FBGSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour self.FBGData= pd.concat([self.FBGData,FBGDataTemp],ignore_index=True) #Delete collum Sample self.FBGData=self.FBGData.drop('Sample',1)", "GNU General Public License along with Foobar. If not, see <http://www.gnu.org/licenses/> \"\"\" #Packages", "2015 DTU Wind Energy Author: <NAME> Email: <EMAIL>; <EMAIL> Last revision: 02-08-2016 ***License***:", "Class To Load and Sync two files: Thermocouple and FBG Copyright (C) <NAME>", "3 of the License, or (at your option) any later version. FBG_ReStrain is", "version. FBG_ReStrain is distributed in the hope that it will be useful, but", "(at your option) any later version. FBG_ReStrain is distributed in the hope that", "self.FBGColNumb=FBGColNumb self.FBGColNames=FBGColNames #Load File #Temp self.TempData=pd.DataFrame() for i in range (0,self.TempFileNum): TempPathtemp=self.TempPaths[i].replace(\"\\\\\",\"/\",99) TempDataTemp=pd.read_csv(TempPathtemp,sep=self.TempSep,names=self.TempColNames,skiprows=self.TempSkipRows,parse_dates=[['Date',", "FBG_ReStrain. FBG_ReStrain is free software: you can redistribute it and/or modify it under", "A PARTICULAR PURPOSE. See the GNU General Public License for more details. You", "Hour self.FBGData= pd.concat([self.FBGData,FBGDataTemp],ignore_index=True) #Delete collum Sample self.FBGData=self.FBGData.drop('Sample',1) #Organixe index self.FBGData=self.FBGData.set_index('Date_Time') def Syncron(self): \"\"\"", "the Free Software Foundation, either version 3 of the License, or (at your", "self.FBGData=self.FBGData.set_index('Date_Time') def Syncron(self): \"\"\" Syncronize the FBG files and Temp files ---------- \"\"\"", "License along with Foobar. If not, see <http://www.gnu.org/licenses/> \"\"\" #Packages import pandas as", "or (at your option) any later version. FBG_ReStrain is distributed in the hope", "# Data separador self.FBGColNumb=FBGColNumb self.FBGColNames=FBGColNames #Load File #Temp self.TempData=pd.DataFrame() for i in range", "timestamp self.TempData=self.TempData.set_index('Date_Time') #FBG self.FBGData=pd.DataFrame() for i in range(0,self.FBGFileNum): FBGPathtemp=self.FBGPaths[i].replace(\"\\\\\",\"/\",99) FBGDataTemp=pd.read_csv(FBGPathtemp,sep=self.FBGSep,names=self.FBGColNames,skiprows=self.FBGSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date", "range(0,self.FBGFileNum): FBGPathtemp=self.FBGPaths[i].replace(\"\\\\\",\"/\",99) FBGDataTemp=pd.read_csv(FBGPathtemp,sep=self.FBGSep,names=self.FBGColNames,skiprows=self.FBGSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour self.FBGData= pd.concat([self.FBGData,FBGDataTemp],ignore_index=True) #Delete collum Sample", "TempSep=';' FBGPaths=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.09.31.02 ; 2015.07.16.10.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.10.31.02 ; 2015.07.16.11.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.11.31.02 ;", "of FBG_ReStrain. FBG_ReStrain is free software: you can redistribute it and/or modify it", "the Data (skip head) self.TempSep=TempSep # Data separador self.TempColNumb=TempColNumb self.TempColNames=TempColNames self.TimeCorrect=TimeCorrect #FBG Input", "(C) <NAME> 2015 DTU Wind Energy Author: <NAME> Email: <EMAIL>; <EMAIL> Last revision:", "You should have received a copy of the GNU General Public License along", "Data [2015.07.16.10.31.02 ; 2015.07.16.11.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.11.31.02 ; 2015.07.16.12.31.01].txt'] FBGFileNum=3 FBGColNumb=5 FBGColNames=['Date','Time','Sample','FBG1','FBG2'] FBGSkipRows=2 FBGSep='\\t'", "PURPOSE. See the GNU General Public License for more details. You should have", "pd.concat([self.FBGData,FBGDataTemp],ignore_index=True) #Delete collum Sample self.FBGData=self.FBGData.drop('Sample',1) #Organixe index self.FBGData=self.FBGData.set_index('Date_Time') def Syncron(self): \"\"\" Syncronize the", "and Sync two files: Thermocouple and FBG Copyright (C) <NAME> 2015 DTU Wind", "self.TempData=self.TempData.set_index('Date_Time') #FBG self.FBGData=pd.DataFrame() for i in range(0,self.FBGFileNum): FBGPathtemp=self.FBGPaths[i].replace(\"\\\\\",\"/\",99) FBGDataTemp=pd.read_csv(FBGPathtemp,sep=self.FBGSep,names=self.FBGColNames,skiprows=self.FBGSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and", "and Hour self.FBGData= pd.concat([self.FBGData,FBGDataTemp],ignore_index=True) #Delete collum Sample self.FBGData=self.FBGData.drop('Sample',1) #Organixe index self.FBGData=self.FBGData.set_index('Date_Time') def Syncron(self):", "of the GNU General Public License along with Foobar. If not, see <http://www.gnu.org/licenses/>", "file without the GUI \"\"\" TempPath=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\sync\\\\temp.csv'] TempFileNum=1 TempColNumb=5 TempColNames=['Date','Time','Temp1','Temp2','Temp3'] TempSkipRows=1 TempSep=';' FBGPaths=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data", "index=self.SyncData.index)) #Commands used to run this file without the GUI \"\"\" TempPath=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\sync\\\\temp.csv'] TempFileNum=1", "dt class FBG_Temp_Loading(object): def __init__(self,TempPaths,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames,TimeCorrect=0): \"\"\" Initialized the FBG and Temp Loading Class", "Data (skip head) self.FBGSep=FBGSep # Data separador self.FBGColNumb=FBGColNumb self.FBGColNames=FBGColNames #Load File #Temp self.TempData=pd.DataFrame()", "any later version. FBG_ReStrain is distributed in the hope that it will be", "head) self.FBGSep=FBGSep # Data separador self.FBGColNumb=FBGColNumb self.FBGColNames=FBGColNames #Load File #Temp self.TempData=pd.DataFrame() for i", "File #Temp self.TempData=pd.DataFrame() for i in range (0,self.TempFileNum): TempPathtemp=self.TempPaths[i].replace(\"\\\\\",\"/\",99) TempDataTemp=pd.read_csv(TempPathtemp,sep=self.TempSep,names=self.TempColNames,skiprows=self.TempSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date", "(0,self.TempFileNum): TempPathtemp=self.TempPaths[i].replace(\"\\\\\",\"/\",99) TempDataTemp=pd.read_csv(TempPathtemp,sep=self.TempSep,names=self.TempColNames,skiprows=self.TempSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour self.TempData=pd.concat([self.TempData,TempDataTemp],ignore_index=True) #Correct Time self.TempData['Date_Time']=self.TempData['Date_Time']+dt.timedelta(hours=self.TimeCorrect) #Date_Time", "Foobar. If not, see <http://www.gnu.org/licenses/> \"\"\" #Packages import pandas as pd import datetime", "published by the Free Software Foundation, either version 3 of the License, or", "hope that it will be useful, but WITHOUT ANY WARRANTY; without even the", "of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public", "the terms of the GNU General Public License as published by the Free", "Time self.TempData['Date_Time']=self.TempData['Date_Time']+dt.timedelta(hours=self.TimeCorrect) #Date_Time timestamp self.TempData=self.TempData.set_index('Date_Time') #FBG self.FBGData=pd.DataFrame() for i in range(0,self.FBGFileNum): FBGPathtemp=self.FBGPaths[i].replace(\"\\\\\",\"/\",99) FBGDataTemp=pd.read_csv(FBGPathtemp,sep=self.FBGSep,names=self.FBGColNames,skiprows=self.FBGSkipRows,parse_dates=[['Date',", "Sample self.FBGData=self.FBGData.drop('Sample',1) #Organixe index self.FBGData=self.FBGData.set_index('Date_Time') def Syncron(self): \"\"\" Syncronize the FBG files and", "; 2015.07.16.10.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.10.31.02 ; 2015.07.16.11.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.11.31.02 ; 2015.07.16.12.31.01].txt'] FBGFileNum=3 FBGColNumb=5 FBGColNames=['Date','Time','Sample','FBG1','FBG2']", "copy of the GNU General Public License along with Foobar. If not, see", "Hour self.TempData=pd.concat([self.TempData,TempDataTemp],ignore_index=True) #Correct Time self.TempData['Date_Time']=self.TempData['Date_Time']+dt.timedelta(hours=self.TimeCorrect) #Date_Time timestamp self.TempData=self.TempData.set_index('Date_Time') #FBG self.FBGData=pd.DataFrame() for i in", "\"\"\" Initialized the FBG and Temp Loading Class file Input_Parameters: ---------- \"\"\" #Temp", "index=self.SyncData.index) self.SyncData.insert(0,'Increment/Sample',pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index)) #Commands used to run this file without the GUI \"\"\"", "FBGPaths=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.09.31.02 ; 2015.07.16.10.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.10.31.02 ; 2015.07.16.11.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.11.31.02 ; 2015.07.16.12.31.01].txt']", "2015.07.16.12.31.01].txt'] FBGFileNum=3 FBGColNumb=5 FBGColNames=['Date','Time','Sample','FBG1','FBG2'] FBGSkipRows=2 FBGSep='\\t' test=FBG_Temp_Loading(TempPath,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames) #test.FBGData['FBG1'].plot() test.Syncron() #To save test.FBGData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\FBG.csv', sep=';')", "see <http://www.gnu.org/licenses/> \"\"\" #Packages import pandas as pd import datetime as dt class", "TempPathtemp=self.TempPaths[i].replace(\"\\\\\",\"/\",99) TempDataTemp=pd.read_csv(TempPathtemp,sep=self.TempSep,names=self.TempColNames,skiprows=self.TempSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour self.TempData=pd.concat([self.TempData,TempDataTemp],ignore_index=True) #Correct Time self.TempData['Date_Time']=self.TempData['Date_Time']+dt.timedelta(hours=self.TimeCorrect) #Date_Time timestamp", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License", "GUI \"\"\" TempPath=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\sync\\\\temp.csv'] TempFileNum=1 TempColNumb=5 TempColNames=['Date','Time','Temp1','Temp2','Temp3'] TempSkipRows=1 TempSep=';' FBGPaths=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.09.31.02 ; 2015.07.16.10.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM", "02-08-2016 ***License***: This file is part of FBG_ReStrain. FBG_ReStrain is free software: you", "2015.07.16.10.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.10.31.02 ; 2015.07.16.11.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.11.31.02 ; 2015.07.16.12.31.01].txt'] FBGFileNum=3 FBGColNumb=5 FBGColNames=['Date','Time','Sample','FBG1','FBG2'] FBGSkipRows=2", "serie #self.SyncData['Increment']=pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index) self.SyncData.insert(0,'Increment/Sample',pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index)) #Commands used to run this file without the", "#Temp self.TempData=pd.DataFrame() for i in range (0,self.TempFileNum): TempPathtemp=self.TempPaths[i].replace(\"\\\\\",\"/\",99) TempDataTemp=pd.read_csv(TempPathtemp,sep=self.TempSep,names=self.TempColNames,skiprows=self.TempSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and", "Wind Energy Author: <NAME> Email: <EMAIL>; <EMAIL> Last revision: 02-08-2016 ***License***: This file", "head) self.TempSep=TempSep # Data separador self.TempColNumb=TempColNumb self.TempColNames=TempColNames self.TimeCorrect=TimeCorrect #FBG Input self.FBGPaths=FBGPaths self.FBGFileNum=FBGFileNum self.FBGSkipRows=FBGSkipRows#Skip", "self.FBGFileNum=FBGFileNum self.FBGSkipRows=FBGSkipRows#Skip to the begin of the Data (skip head) self.FBGSep=FBGSep # Data", "\"\"\" self.SyncData= pd.concat([self.FBGData,self.TempData],axis=1, join='inner') #Create collum with increment/sample serie #self.SyncData['Increment']=pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index) self.SyncData.insert(0,'Increment/Sample',pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index))", "Date and Hour self.TempData=pd.concat([self.TempData,TempDataTemp],ignore_index=True) #Correct Time self.TempData['Date_Time']=self.TempData['Date_Time']+dt.timedelta(hours=self.TimeCorrect) #Date_Time timestamp self.TempData=self.TempData.set_index('Date_Time') #FBG self.FBGData=pd.DataFrame() for", "Software Foundation, either version 3 of the License, or (at your option) any", "for i in range(0,self.FBGFileNum): FBGPathtemp=self.FBGPaths[i].replace(\"\\\\\",\"/\",99) FBGDataTemp=pd.read_csv(FBGPathtemp,sep=self.FBGSep,names=self.FBGColNames,skiprows=self.FBGSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour self.FBGData= pd.concat([self.FBGData,FBGDataTemp],ignore_index=True)", "a copy of the GNU General Public License along with Foobar. If not,", "self.TempColNumb=TempColNumb self.TempColNames=TempColNames self.TimeCorrect=TimeCorrect #FBG Input self.FBGPaths=FBGPaths self.FBGFileNum=FBGFileNum self.FBGSkipRows=FBGSkipRows#Skip to the begin of the", "self.FBGColNames=FBGColNames #Load File #Temp self.TempData=pd.DataFrame() for i in range (0,self.TempFileNum): TempPathtemp=self.TempPaths[i].replace(\"\\\\\",\"/\",99) TempDataTemp=pd.read_csv(TempPathtemp,sep=self.TempSep,names=self.TempColNames,skiprows=self.TempSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True)", "To Load and Sync two files: Thermocouple and FBG Copyright (C) <NAME> 2015", "#Organixe index self.FBGData=self.FBGData.set_index('Date_Time') def Syncron(self): \"\"\" Syncronize the FBG files and Temp files", "pandas as pd import datetime as dt class FBG_Temp_Loading(object): def __init__(self,TempPaths,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames,TimeCorrect=0): \"\"\" Initialized", "increment/sample serie #self.SyncData['Increment']=pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index) self.SyncData.insert(0,'Increment/Sample',pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index)) #Commands used to run this file without", "run this file without the GUI \"\"\" TempPath=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\sync\\\\temp.csv'] TempFileNum=1 TempColNumb=5 TempColNames=['Date','Time','Temp1','Temp2','Temp3'] TempSkipRows=1 TempSep=';'", "TempColNumb=5 TempColNames=['Date','Time','Temp1','Temp2','Temp3'] TempSkipRows=1 TempSep=';' FBGPaths=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.09.31.02 ; 2015.07.16.10.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.10.31.02 ; 2015.07.16.11.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM", "Syncronize the FBG files and Temp files ---------- \"\"\" self.SyncData= pd.concat([self.FBGData,self.TempData],axis=1, join='inner') #Create", "\"\"\" Python Class To Load and Sync two files: Thermocouple and FBG Copyright", "self.FBGSep=FBGSep # Data separador self.FBGColNumb=FBGColNumb self.FBGColNames=FBGColNames #Load File #Temp self.TempData=pd.DataFrame() for i in", "---------- \"\"\" self.SyncData= pd.concat([self.FBGData,self.TempData],axis=1, join='inner') #Create collum with increment/sample serie #self.SyncData['Increment']=pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index) self.SyncData.insert(0,'Increment/Sample',pd.Series(range(0,len(self.SyncData)),", "without the GUI \"\"\" TempPath=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\sync\\\\temp.csv'] TempFileNum=1 TempColNumb=5 TempColNames=['Date','Time','Temp1','Temp2','Temp3'] TempSkipRows=1 TempSep=';' FBGPaths=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.09.31.02", "Author: <NAME> Email: <EMAIL>; <EMAIL> Last revision: 02-08-2016 ***License***: This file is part", "useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or", "either version 3 of the License, or (at your option) any later version.", "self.TempColNames=TempColNames self.TimeCorrect=TimeCorrect #FBG Input self.FBGPaths=FBGPaths self.FBGFileNum=FBGFileNum self.FBGSkipRows=FBGSkipRows#Skip to the begin of the Data", "index self.FBGData=self.FBGData.set_index('Date_Time') def Syncron(self): \"\"\" Syncronize the FBG files and Temp files ----------", "Thermocouple and FBG Copyright (C) <NAME> 2015 DTU Wind Energy Author: <NAME> Email:", "TempColNames=['Date','Time','Temp1','Temp2','Temp3'] TempSkipRows=1 TempSep=';' FBGPaths=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.09.31.02 ; 2015.07.16.10.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.10.31.02 ; 2015.07.16.11.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data", "#Temp Input self.TempPaths=TempPaths self.TempFileNum=TempFileNum self.TempSkipRows=TempSkipRows #Skip to the begin of the Data (skip", "Input self.FBGPaths=FBGPaths self.FBGFileNum=FBGFileNum self.FBGSkipRows=FBGSkipRows#Skip to the begin of the Data (skip head) self.FBGSep=FBGSep", "self.FBGSkipRows=FBGSkipRows#Skip to the begin of the Data (skip head) self.FBGSep=FBGSep # Data separador", "\"\"\" #Packages import pandas as pd import datetime as dt class FBG_Temp_Loading(object): def", "\"\"\" Syncronize the FBG files and Temp files ---------- \"\"\" self.SyncData= pd.concat([self.FBGData,self.TempData],axis=1, join='inner')", "is distributed in the hope that it will be useful, but WITHOUT ANY", "self.TempPaths=TempPaths self.TempFileNum=TempFileNum self.TempSkipRows=TempSkipRows #Skip to the begin of the Data (skip head) self.TempSep=TempSep", "redistribute it and/or modify it under the terms of the GNU General Public", "by the Free Software Foundation, either version 3 of the License, or (at", "option) any later version. FBG_ReStrain is distributed in the hope that it will", "is part of FBG_ReStrain. FBG_ReStrain is free software: you can redistribute it and/or", "<reponame>GilmarPereira/ReStrain \"\"\" Python Class To Load and Sync two files: Thermocouple and FBG", "***License***: This file is part of FBG_ReStrain. FBG_ReStrain is free software: you can", "you can redistribute it and/or modify it under the terms of the GNU", "later version. FBG_ReStrain is distributed in the hope that it will be useful,", "Public License for more details. You should have received a copy of the", "<NAME> Email: <EMAIL>; <EMAIL> Last revision: 02-08-2016 ***License***: This file is part of", "with Foobar. If not, see <http://www.gnu.org/licenses/> \"\"\" #Packages import pandas as pd import", "FBG_Temp_Loading(object): def __init__(self,TempPaths,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames,TimeCorrect=0): \"\"\" Initialized the FBG and Temp Loading Class file Input_Parameters:", "#FBG Input self.FBGPaths=FBGPaths self.FBGFileNum=FBGFileNum self.FBGSkipRows=FBGSkipRows#Skip to the begin of the Data (skip head)", "TempDataTemp=pd.read_csv(TempPathtemp,sep=self.TempSep,names=self.TempColNames,skiprows=self.TempSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour self.TempData=pd.concat([self.TempData,TempDataTemp],ignore_index=True) #Correct Time self.TempData['Date_Time']=self.TempData['Date_Time']+dt.timedelta(hours=self.TimeCorrect) #Date_Time timestamp self.TempData=self.TempData.set_index('Date_Time')", "Python Class To Load and Sync two files: Thermocouple and FBG Copyright (C)", "version 3 of the License, or (at your option) any later version. FBG_ReStrain", "as dt class FBG_Temp_Loading(object): def __init__(self,TempPaths,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames,TimeCorrect=0): \"\"\" Initialized the FBG and Temp Loading", "and Hour self.TempData=pd.concat([self.TempData,TempDataTemp],ignore_index=True) #Correct Time self.TempData['Date_Time']=self.TempData['Date_Time']+dt.timedelta(hours=self.TimeCorrect) #Date_Time timestamp self.TempData=self.TempData.set_index('Date_Time') #FBG self.FBGData=pd.DataFrame() for i", "self.FBGPaths=FBGPaths self.FBGFileNum=FBGFileNum self.FBGSkipRows=FBGSkipRows#Skip to the begin of the Data (skip head) self.FBGSep=FBGSep #", "def __init__(self,TempPaths,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames,TimeCorrect=0): \"\"\" Initialized the FBG and Temp Loading Class file Input_Parameters: ----------", "FBGDataTemp=pd.read_csv(FBGPathtemp,sep=self.FBGSep,names=self.FBGColNames,skiprows=self.FBGSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour self.FBGData= pd.concat([self.FBGData,FBGDataTemp],ignore_index=True) #Delete collum Sample self.FBGData=self.FBGData.drop('Sample',1) #Organixe", "self.TempData=pd.DataFrame() for i in range (0,self.TempFileNum): TempPathtemp=self.TempPaths[i].replace(\"\\\\\",\"/\",99) TempDataTemp=pd.read_csv(TempPathtemp,sep=self.TempSep,names=self.TempColNames,skiprows=self.TempSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour", "the GNU General Public License for more details. You should have received a", "[2015.07.16.10.31.02 ; 2015.07.16.11.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.11.31.02 ; 2015.07.16.12.31.01].txt'] FBGFileNum=3 FBGColNumb=5 FBGColNames=['Date','Time','Sample','FBG1','FBG2'] FBGSkipRows=2 FBGSep='\\t' test=FBG_Temp_Loading(TempPath,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames)", "self.TempData=pd.concat([self.TempData,TempDataTemp],ignore_index=True) #Correct Time self.TempData['Date_Time']=self.TempData['Date_Time']+dt.timedelta(hours=self.TimeCorrect) #Date_Time timestamp self.TempData=self.TempData.set_index('Date_Time') #FBG self.FBGData=pd.DataFrame() for i in range(0,self.FBGFileNum):", "under the terms of the GNU General Public License as published by the", "be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY", "Energy Author: <NAME> Email: <EMAIL>; <EMAIL> Last revision: 02-08-2016 ***License***: This file is", "[2015.07.16.11.31.02 ; 2015.07.16.12.31.01].txt'] FBGFileNum=3 FBGColNumb=5 FBGColNames=['Date','Time','Sample','FBG1','FBG2'] FBGSkipRows=2 FBGSep='\\t' test=FBG_Temp_Loading(TempPath,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames) #test.FBGData['FBG1'].plot() test.Syncron() #To save", "files ---------- \"\"\" self.SyncData= pd.concat([self.FBGData,self.TempData],axis=1, join='inner') #Create collum with increment/sample serie #self.SyncData['Increment']=pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index)", "self.FBGData=pd.DataFrame() for i in range(0,self.FBGFileNum): FBGPathtemp=self.FBGPaths[i].replace(\"\\\\\",\"/\",99) FBGDataTemp=pd.read_csv(FBGPathtemp,sep=self.FBGSep,names=self.FBGColNames,skiprows=self.FBGSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True) #Merge Date and Hour self.FBGData=", "FBG Copyright (C) <NAME> 2015 DTU Wind Energy Author: <NAME> Email: <EMAIL>; <EMAIL>", "file is part of FBG_ReStrain. FBG_ReStrain is free software: you can redistribute it", "the begin of the Data (skip head) self.TempSep=TempSep # Data separador self.TempColNumb=TempColNumb self.TempColNames=TempColNames", "Data separador self.FBGColNumb=FBGColNumb self.FBGColNames=FBGColNames #Load File #Temp self.TempData=pd.DataFrame() for i in range (0,self.TempFileNum):", "join='inner') #Create collum with increment/sample serie #self.SyncData['Increment']=pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index) self.SyncData.insert(0,'Increment/Sample',pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index)) #Commands used to", "Loading Class file Input_Parameters: ---------- \"\"\" #Temp Input self.TempPaths=TempPaths self.TempFileNum=TempFileNum self.TempSkipRows=TempSkipRows #Skip to", "your option) any later version. FBG_ReStrain is distributed in the hope that it", "(skip head) self.FBGSep=FBGSep # Data separador self.FBGColNumb=FBGColNumb self.FBGColNames=FBGColNames #Load File #Temp self.TempData=pd.DataFrame() for", "of the Data (skip head) self.FBGSep=FBGSep # Data separador self.FBGColNumb=FBGColNumb self.FBGColNames=FBGColNames #Load File", "; 2015.07.16.12.31.01].txt'] FBGFileNum=3 FBGColNumb=5 FBGColNames=['Date','Time','Sample','FBG1','FBG2'] FBGSkipRows=2 FBGSep='\\t' test=FBG_Temp_Loading(TempPath,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames) #test.FBGData['FBG1'].plot() test.Syncron() #To save test.FBGData.to_csv('C:\\\\Users\\\\gfpe\\\\Desktop\\\\FBG.csv',", "TempPath=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\sync\\\\temp.csv'] TempFileNum=1 TempColNumb=5 TempColNames=['Date','Time','Temp1','Temp2','Temp3'] TempSkipRows=1 TempSep=';' FBGPaths=['C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.09.31.02 ; 2015.07.16.10.31.01].txt','C:\\\\Users\\\\gfpe\\\\Desktop\\\\Example_Temp_File\\\\Sync\\\\BM Data [2015.07.16.10.31.02" ]
[ "class Migration(migrations.Migration): dependencies = [ ('PiGPIO', '0006_program_logging'), ] operations = [ migrations.CreateModel( name='ProgramLog',", "= [ ('PiGPIO', '0006_program_logging'), ] operations = [ migrations.CreateModel( name='ProgramLog', fields=[ ('id', models.AutoField(auto_created=True,", "23:17 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "'0006_program_logging'), ] operations = [ migrations.CreateModel( name='ProgramLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "<reponame>girisandeep/Django-PiGPIO<filename>PiGPIO/migrations/0007_programlog.py # Generated by Django 2.1.7 on 2019-03-25 23:17 from django.db import migrations,", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('PiGPIO', '0006_program_logging'), ] operations = [ migrations.CreateModel(", "# Generated by Django 2.1.7 on 2019-03-25 23:17 from django.db import migrations, models", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('PiGPIO', '0006_program_logging'),", "2019-03-25 23:17 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('PiGPIO', '0006_program_logging'), ] operations =", "2.1.7 on 2019-03-25 23:17 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "operations = [ migrations.CreateModel( name='ProgramLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('info', models.CharField(default='',", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('PiGPIO', '0006_program_logging'), ] operations", "serialize=False, verbose_name='ID')), ('info', models.CharField(default='', max_length=256, null=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('step', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PiGPIO.ProgramStep')), ], ),", "by Django 2.1.7 on 2019-03-25 23:17 from django.db import migrations, models import django.db.models.deletion", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('PiGPIO',", "name='ProgramLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('info', models.CharField(default='', max_length=256, null=True)), ('created_at', models.DateTimeField(auto_now_add=True)),", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('info', models.CharField(default='', max_length=256, null=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('step',", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('info', models.CharField(default='', max_length=256, null=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('step', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PiGPIO.ProgramStep')),", "on 2019-03-25 23:17 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "verbose_name='ID')), ('info', models.CharField(default='', max_length=256, null=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('step', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PiGPIO.ProgramStep')), ], ), ]", "migrations.CreateModel( name='ProgramLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('info', models.CharField(default='', max_length=256, null=True)), ('created_at',", "Generated by Django 2.1.7 on 2019-03-25 23:17 from django.db import migrations, models import", "Django 2.1.7 on 2019-03-25 23:17 from django.db import migrations, models import django.db.models.deletion class", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('PiGPIO', '0006_program_logging'), ]", "('PiGPIO', '0006_program_logging'), ] operations = [ migrations.CreateModel( name='ProgramLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('PiGPIO', '0006_program_logging'), ] operations = [", "[ migrations.CreateModel( name='ProgramLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('info', models.CharField(default='', max_length=256, null=True)),", "] operations = [ migrations.CreateModel( name='ProgramLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('info',", "[ ('PiGPIO', '0006_program_logging'), ] operations = [ migrations.CreateModel( name='ProgramLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "= [ migrations.CreateModel( name='ProgramLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('info', models.CharField(default='', max_length=256,", "primary_key=True, serialize=False, verbose_name='ID')), ('info', models.CharField(default='', max_length=256, null=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('step', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PiGPIO.ProgramStep')), ],", "dependencies = [ ('PiGPIO', '0006_program_logging'), ] operations = [ migrations.CreateModel( name='ProgramLog', fields=[ ('id',", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('info', models.CharField(default='', max_length=256, null=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('step', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "Migration(migrations.Migration): dependencies = [ ('PiGPIO', '0006_program_logging'), ] operations = [ migrations.CreateModel( name='ProgramLog', fields=[" ]
[ "coding: utf-8 -*- import time def function1(seconds): time.sleep(seconds) if __name__ == \"__main__\": function1(100)", "<filename>tests/scripts/unicode💩.py<gh_stars>1000+ #!/env/bin/python # -*- coding: utf-8 -*- import time def function1(seconds): time.sleep(seconds) if", "# -*- coding: utf-8 -*- import time def function1(seconds): time.sleep(seconds) if __name__ ==", "#!/env/bin/python # -*- coding: utf-8 -*- import time def function1(seconds): time.sleep(seconds) if __name__", "-*- coding: utf-8 -*- import time def function1(seconds): time.sleep(seconds) if __name__ == \"__main__\":" ]
[ "collect_handlers, handle fetcher = Fetcher(\"api/\") def create_display(name): disp = SseqDisplay(name) print(f\"Creating display at", "chart=None): self.name = name self.chart = None self.save_file_handle = FileHandle() self.autosave = False", "functions. \"\"\" # if not hasattr(self.chart, name): # raise AttributeError(f'Instance of {self.__class__.__name__} has", "async def update_a(self): await self.chart.update_a() async def send_batched_messages_a(self, messages): console.log(\"Sending batched messages:\", messages)", "communicate between the SseqChart and the browser. All of the data is contained", "of date. # So let's send an update to the existing charts first.", "the browser. All of the data is contained in the field SseqDisplay.chart which", "self._started = False self.executor.loop.call_soon(self.start_a()) def __repr__(self): if self._started: return f'{type(self).__name__}(name=\"{self.name}\", url=\"{self.url}\", chart={self.chart})' return", "JSON.stringify(dict(cmd=cmd, args=[], kwargs=kwargs)) async def send_message_a(self, cmd, **kwargs): message = SseqDisplay._create_message(cmd, **kwargs) for", "self.set_sseq(SseqChart.from_json(json_obj)) def set_sseq(self, chart): if self.chart is not None: self.chart._agent = None self.chart", "a variable and use it directly. \"\"\" # displays = {} def __init__(self,", "charts first. await self.update_a() self.subscribers[client_id] = port # \"initialize\" command sets chart range", "a superset of what \"reset\" does. port.postMessage(SseqDisplay._create_message(\"chart.state.initialize\", state = self.chart.to_json())) @handle(\"initialize.complete\") async def", "wrapped_fset = _wrap_chart_func(func.fset) if func.fdel: wrapped_fdel = _wrap_chart_func(func.fdel) wrapped = property(wrapped_fget, wrapped_fset, wrapped_fdel)", "is str: json_obj = json.loads(json_obj) self.set_sseq(SseqChart.from_json(json_obj)) def set_sseq(self, chart): if self.chart is not", "wrap def _bind_chart_attribute(name): func = getattr(SseqChart, name) func_type_name = type(func).__name__ if func_type_name ==", "def start_a(self): if self._started: return self._started = True response = await fetcher.put(f\"charts/{self.name}\", {})", "400: raise Exception(f\"Failed to create chart: {response.status_text}\") body = await response.json() print(f'Display started.", "name) func_type_name = type(func).__name__ if func_type_name == \"function\": wrapped = _wrap_chart_func(func) elif func_type_name", "func_type_name == \"property\": wrapped_fget = None wrapped_fset = None wrapped_fdel = None if", "return getattr(self.chart, name) def load_json(self, json_obj): if type(json_obj) is str: json_obj = json.loads(json_obj)", "port.postMessage(message) async def send_message_to_target_client_a(self, port, cmd, uuid, **kwargs): port.postMessage(JSON.stringify(dict( cmd=cmd, uuid=uuid, args=[], kwargs=kwargs", "chart range and page in addition to setting the chart. # \"initialize\" does", "property(wrapped_fget, wrapped_fset, wrapped_fdel) else: raise AssertionError() setattr(SseqDisplay, name, wrapped) # for a in", "url=\"{self.url}\", chart={self.chart})' return f\"\"\"{type(self).__name__}(name=\"{self.name}\", state=\"Not started, run 'await display.start_a()' to start.\")\"\"\" # def", "the field SseqDisplay.chart which is the SseqChart object that is being displayed. You", "# def __dir__(self): # \"\"\" getattr and dir have to be set up", "**kwargs )) @staticmethod def _create_message(cmd, **kwargs): return JSON.stringify(dict(cmd=cmd, args=[], kwargs=kwargs)) async def send_message_a(self,", "superset of what \"reset\" does. port.postMessage(SseqDisplay._create_message(\"chart.state.initialize\", state = self.chart.to_json())) @handle(\"initialize.complete\") async def initialize__complete__a(self,", "if path: working_directory = await get_working_directory_a() if not working_directory: raise RuntimeError(\"...\") self.save_file_handle =", "if func.fset: wrapped_fset = _wrap_chart_func(func.fset) if func.fdel: wrapped_fdel = _wrap_chart_func(func.fdel) wrapped = property(wrapped_fget,", "SseqChart from spectralsequence_chart.serialization import JSON from working_directory import get_working_directory_a, set_working_directory_a from functools import", "self.autosave and self.save_file_handle.is_open(): await self.save_a() async def save_a(self): await self.save_file_handle.ensure_open_a(modify=True) await self.save_file_handle.write_text_a(JSON.stringify(self.chart)) async", "not hasattr(self.chart, name): # raise AttributeError(f'Instance of {self.__class__.__name__} has no attribute {name}') #", "the existing charts first. await self.update_a() self.subscribers[client_id] = port # \"initialize\" command sets", "self.subscribers = {} SseqDisplay.displays[name] = self from repl.executor import Executor self.executor = Executor.executor", "self.chart is not None: self.chart._agent = None self.chart = chart self.chart._agent = self", "return JSON.stringify(dict(cmd=cmd, args=[], kwargs=kwargs)) async def send_message_a(self, cmd, **kwargs): message = SseqDisplay._create_message(cmd, **kwargs)", "make sure that we don't have other charts that are out of date.", "self.update_a() self.subscribers[client_id] = port # \"initialize\" command sets chart range and page in", "json_obj = json.loads(json_obj) self.set_sseq(SseqChart.from_json(json_obj)) def set_sseq(self, chart): if self.chart is not None: self.chart._agent", "__init__(self, name, chart=None): self.name = name self.chart = None self.save_file_handle = FileHandle() self.autosave", "be set up carefully to allow jedi to provide good docs for the", "self.chart._agent = self @property def url(self): directory = str(pathlib.Path(location.pathname).parent) return f\"{location.protocol}//{location.host}{directory}/charts/{self.name}\" async def", "sets chart range and page in addition to setting the chart. # \"initialize\"", "kwargs): kwargs = dict(kwargs) console.log(f\"SseqDisplay.handle_message({cmd}, {JSON.stringify(kwargs)})\") self.executor.loop.call_soon(self.message_handlers[cmd]( self, uuid=uuid, port=port, client_id=client_id, **kwargs ))", "this works only for properties... # if type(getattr(SseqChart, a)) is property: # _bind_chart_attribute(a)", "del obj[\"message\"] message.update(obj) chart_name = message[\"chart_name\"] del message[\"chart_name\"] display = SseqDisplay.displays[chart_name] display.handle_message(**message) def", "func.fget: wrapped_fget = _wrap_chart_func(func.fget) if func.fset: wrapped_fset = _wrap_chart_func(func.fset) if func.fdel: wrapped_fdel =", "wrapped_fset, wrapped_fdel) else: raise AssertionError() setattr(SseqDisplay, name, wrapped) # for a in dir(SseqChart):", "\"\"\" # result = self.chart.__dir__() # result.extend(self.__dict__.keys()) # return sorted(set(result)) # def __getattr__(self,", "A Spectral Sequence display. This contains the logic to communicate between the SseqChart", "SseqDisplay(name) await disp.load_a() return disp.chart @collect_handlers(\"message_handlers\") class SseqDisplay: \"\"\" A Spectral Sequence display.", "set up carefully to allow jedi to provide good docs for the SseqChart", "name): # raise AttributeError(f'Instance of {self.__class__.__name__} has no attribute {name}') # return getattr(self.chart,", "wrapped_fget = _wrap_chart_func(func.fget) if func.fset: wrapped_fset = _wrap_chart_func(func.fset) if func.fdel: wrapped_fdel = _wrap_chart_func(func.fdel)", "docs for the SseqChart functions. \"\"\" # result = self.chart.__dir__() # result.extend(self.__dict__.keys()) #", "to start.\")\"\"\" # def __dir__(self): # \"\"\" getattr and dir have to be", "jedi to provide good docs for the SseqChart functions. \"\"\" # result =", "store the chart into a variable and use it directly. \"\"\" # displays", "= chart or SseqChart(name) self.set_sseq(chart) self.subscribers = {} SseqDisplay.displays[name] = self from repl.executor", "disp.chart async def load_display_a(name): disp = SseqDisplay(name) await disp.load_a() return disp.chart @collect_handlers(\"message_handlers\") class", "console.log(\"Sending batched messages:\", messages) await self.send_message_a(\"chart.update\", messages = messages) await self.maybe_autosave_a() async def", "has no attribute {name}') # return getattr(self.chart, name) def load_json(self, json_obj): if type(json_obj)", "import get_working_directory_a, set_working_directory_a from functools import wraps from repl.handler_decorator import collect_handlers, handle fetcher", "self.send_message_a(\"chart.update\", messages = messages) await self.maybe_autosave_a() async def maybe_autosave_a(self): if self.autosave and self.save_file_handle.is_open():", "= json.loads(json_obj) self.set_sseq(SseqChart.from_json(json_obj)) def set_sseq(self, chart): if self.chart is not None: self.chart._agent =", "start_a(self): if self._started: return self._started = True response = await fetcher.put(f\"charts/{self.name}\", {}) if", "for properties. # # For properties, we copy a wrapper from SseqChart to", "\"\"\" # displays = {} def __init__(self, name, chart=None): self.name = name self.chart", "# So __dir__ / __getattr__ work only for methods and this works only", "are out of date. # So let's send an update to the existing", "in dir(SseqChart): # if a.startswith(\"_\") or a in dir(SseqDisplay): # continue # #", "for port in self.subscribers.values(): port.postMessage(message) async def send_message_to_target_client_a(self, port, cmd, uuid, **kwargs): port.postMessage(JSON.stringify(dict(", "wrapped = _wrap_chart_func(func) elif func_type_name == \"property\": wrapped_fget = None wrapped_fset = None", "get docs for properties. # # For properties, we copy a wrapper from", "from SseqChart to SseqDisplay. # # Note that if we do this for", "FileHandle import json import pathlib from spectralsequence_chart import SseqChart from spectralsequence_chart.serialization import JSON", "getattr(self.chart, name) def load_json(self, json_obj): if type(json_obj) is str: json_obj = json.loads(json_obj) self.set_sseq(SseqChart.from_json(json_obj))", "spectralsequence_chart import SseqChart from spectralsequence_chart.serialization import JSON from working_directory import get_working_directory_a, set_working_directory_a from", "import Fetcher from js_wrappers.filesystem import FileHandle import json import pathlib from spectralsequence_chart import", "= str(pathlib.Path(location.pathname).parent) return f\"{location.protocol}//{location.host}{directory}/charts/{self.name}\" async def start_a(self): if self._started: return self._started = True", "jedi get_signatures. # # So __dir__ / __getattr__ work only for methods and", "_wrap_chart_func(func): @wraps(func) def wrap(self, *args, **kwargs): return func(self.chart, *args, **kwargs) return wrap def", "set_sseq(self, chart): if self.chart is not None: self.chart._agent = None self.chart = chart", "All of the data is contained in the field SseqDisplay.chart which is the", "self.subscribers.values(): port.postMessage(message) async def send_message_to_target_client_a(self, port, cmd, uuid, **kwargs): port.postMessage(JSON.stringify(dict( cmd=cmd, uuid=uuid, args=[],", "path = None): if path: working_directory = await get_working_directory_a() if not working_directory: raise", "uuid, port, client_id): print(\"initialize.complete\") def _wrap_chart_func(func): @wraps(func) def wrap(self, *args, **kwargs): return func(self.chart,", "= await get_working_directory_a() if not working_directory: raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a(create=True) else:", "handle_message(self, cmd, args, port, client_id, uuid, kwargs): kwargs = dict(kwargs) console.log(f\"SseqDisplay.handle_message({cmd}, {JSON.stringify(kwargs)})\") self.executor.loop.call_soon(self.message_handlers[cmd](", "async def save_as_a(self, path = None): if path: working_directory = await get_working_directory_a() if", "\"\"\" A Spectral Sequence display. This contains the logic to communicate between the", "# For properties, we copy a wrapper from SseqChart to SseqDisplay. # #", "json_obj): if type(json_obj) is str: json_obj = json.loads(json_obj) self.set_sseq(SseqChart.from_json(json_obj)) def set_sseq(self, chart): if", "SseqChart and the browser. All of the data is contained in the field", "{JSON.stringify(kwargs)})\") self.executor.loop.call_soon(self.message_handlers[cmd]( self, uuid=uuid, port=port, client_id=client_id, **kwargs )) @staticmethod def _create_message(cmd, **kwargs): return", "console ) from js_wrappers.async_js import Fetcher from js_wrappers.filesystem import FileHandle import json import", "disp.chart @collect_handlers(\"message_handlers\") class SseqDisplay: \"\"\" A Spectral Sequence display. This contains the logic", "does a superset of what \"reset\" does. port.postMessage(SseqDisplay._create_message(\"chart.state.initialize\", state = self.chart.to_json())) @handle(\"initialize.complete\") async", "no attribute {name}') # return getattr(self.chart, name) def load_json(self, json_obj): if type(json_obj) is", "else: raise AssertionError() setattr(SseqDisplay, name, wrapped) # for a in dir(SseqChart): # if", "False chart = chart or SseqChart(name) self.set_sseq(chart) self.subscribers = {} SseqDisplay.displays[name] = self", "and dir have to be set up carefully to allow jedi to provide", "= await working_directory.path(path).resolve_file_handle_a() else: self.save_file_handle = FileHandle() await self.save_file_handle.open_a() self.set_sseq(JSON.parse(await self.save_file_handle.read_text_a())) await self.reset_state_a()", "or SseqChart(name) self.set_sseq(chart) self.subscribers = {} SseqDisplay.displays[name] = self from repl.executor import Executor", "get_signatures. # # So __dir__ / __getattr__ work only for methods and this", "from spectralsequence_chart import SseqChart from spectralsequence_chart.serialization import JSON from working_directory import get_working_directory_a, set_working_directory_a", "<reponame>JoeyBF/sseq from js import ( location, console ) from js_wrappers.async_js import Fetcher from", "response = await fetcher.put(f\"charts/{self.name}\", {}) if response.status >= 400: raise Exception(f\"Failed to create", "for the SseqChart functions. \"\"\" # result = self.chart.__dir__() # result.extend(self.__dict__.keys()) # return", "aren't enough to get docs for properties. # # For properties, we copy", "import FileHandle import json import pathlib from spectralsequence_chart import SseqChart from spectralsequence_chart.serialization import", "sure that we don't have other charts that are out of date. #", "logic to communicate between the SseqChart and the browser. All of the data", "cmd, **kwargs): message = SseqDisplay._create_message(cmd, **kwargs) for port in self.subscribers.values(): port.postMessage(message) async def", "return f'{type(self).__name__}(name=\"{self.name}\", url=\"{self.url}\", chart={self.chart})' return f\"\"\"{type(self).__name__}(name=\"{self.name}\", state=\"Not started, run 'await display.start_a()' to start.\")\"\"\"", "= _wrap_chart_func(func.fdel) wrapped = property(wrapped_fget, wrapped_fset, wrapped_fdel) else: raise AssertionError() setattr(SseqDisplay, name, wrapped)", "to the existing charts first. await self.update_a() self.subscribers[client_id] = port # \"initialize\" command", "messages = messages) await self.maybe_autosave_a() async def maybe_autosave_a(self): if self.autosave and self.save_file_handle.is_open(): await", "_wrap_chart_func(func.fdel) wrapped = property(wrapped_fget, wrapped_fset, wrapped_fdel) else: raise AssertionError() setattr(SseqDisplay, name, wrapped) #", "setting the chart. # \"initialize\" does a superset of what \"reset\" does. port.postMessage(SseqDisplay._create_message(\"chart.state.initialize\",", "that if we do this for methods too, it screws up jedi get_signatures.", "await response.json() print(f'Display started. Visit \"{self.url}\" to view.') async def reset_state_a(self): with self.chart._batched_messages_lock:", "_create_message(cmd, **kwargs): return JSON.stringify(dict(cmd=cmd, args=[], kwargs=kwargs)) async def send_message_a(self, cmd, **kwargs): message =", "Note that if we do this for methods too, it screws up jedi", "self.chart._agent = None self.chart = chart self.chart._agent = self @property def url(self): directory", "first. await self.update_a() self.subscribers[client_id] = port # \"initialize\" command sets chart range and", "dict(kwargs) console.log(f\"SseqDisplay.handle_message({cmd}, {JSON.stringify(kwargs)})\") self.executor.loop.call_soon(self.message_handlers[cmd]( self, uuid=uuid, port=port, client_id=client_id, **kwargs )) @staticmethod def _create_message(cmd,", "working_directory import get_working_directory_a, set_working_directory_a from functools import wraps from repl.handler_decorator import collect_handlers, handle", "is not None: self.chart._agent = None self.chart = chart self.chart._agent = self @property", "uuid=uuid, args=[], kwargs=kwargs ))) @handle(\"new_user\") async def new_user__a(self, uuid, port, client_id): print(\"Handling new", "the SseqChart functions. \"\"\" # result = self.chart.__dir__() # result.extend(self.__dict__.keys()) # return sorted(set(result))", "# continue # # The __getattr__ and __dir__ methods above aren't enough to", "# Note that if we do this for methods too, it screws up", "is the SseqChart object that is being displayed. You may want to store", "= True response = await fetcher.put(f\"charts/{self.name}\", {}) if response.status >= 400: raise Exception(f\"Failed", "# So let's send an update to the existing charts first. await self.update_a()", "to provide good docs for the SseqChart functions. \"\"\" # result = self.chart.__dir__()", "async def initialize__complete__a(self, uuid, port, client_id): print(\"initialize.complete\") def _wrap_chart_func(func): @wraps(func) def wrap(self, *args,", "port.postMessage(JSON.stringify(dict( cmd=cmd, uuid=uuid, args=[], kwargs=kwargs ))) @handle(\"new_user\") async def new_user__a(self, uuid, port, client_id):", "self.maybe_autosave_a() def update(self): self.executor.loop.call_soon(self.update_a()) async def update_a(self): await self.chart.update_a() async def send_batched_messages_a(self, messages):", "disp.load_a() return disp.chart @collect_handlers(\"message_handlers\") class SseqDisplay: \"\"\" A Spectral Sequence display. This contains", "AssertionError() setattr(SseqDisplay, name, wrapped) # for a in dir(SseqChart): # if a.startswith(\"_\") or", "in the field SseqDisplay.chart which is the SseqChart object that is being displayed.", "Fetcher(\"api/\") def create_display(name): disp = SseqDisplay(name) print(f\"Creating display at {disp.url}\") return disp.chart async", "self.save_file_handle.ensure_open_a(modify=True) await self.save_file_handle.write_text_a(JSON.stringify(self.chart)) async def save_as_a(self, path = None): if path: working_directory =", "= {} SseqDisplay.displays[name] = self from repl.executor import Executor self.executor = Executor.executor self._started", "messages): console.log(\"Sending batched messages:\", messages) await self.send_message_a(\"chart.update\", messages = messages) await self.maybe_autosave_a() async", "return sorted(set(result)) # def __getattr__(self, name): # \"\"\" getattr and dir have to", "await self.update_a() self.subscribers[client_id] = port # \"initialize\" command sets chart range and page", "def __init__(self, name, chart=None): self.name = name self.chart = None self.save_file_handle = FileHandle()", "= Executor.executor self._started = False self.executor.loop.call_soon(self.start_a()) def __repr__(self): if self._started: return f'{type(self).__name__}(name=\"{self.name}\", url=\"{self.url}\",", "await fetcher.put(f\"charts/{self.name}\", {}) if response.status >= 400: raise Exception(f\"Failed to create chart: {response.status_text}\")", "spectralsequence_chart.serialization import JSON from working_directory import get_working_directory_a, set_working_directory_a from functools import wraps from", "'await display.start_a()' to start.\")\"\"\" # def __dir__(self): # \"\"\" getattr and dir have", "new_user__a(self, uuid, port, client_id): print(\"Handling new user...\") # Might as well make sure", "async def load_a(self, path = None): if path: working_directory = await get_working_directory_a() if", ")) @staticmethod def _create_message(cmd, **kwargs): return JSON.stringify(dict(cmd=cmd, args=[], kwargs=kwargs)) async def send_message_a(self, cmd,", "for a in dir(SseqChart): # if a.startswith(\"_\") or a in dir(SseqDisplay): # continue", "fetcher = Fetcher(\"api/\") def create_display(name): disp = SseqDisplay(name) print(f\"Creating display at {disp.url}\") return", "if a.startswith(\"_\") or a in dir(SseqDisplay): # continue # # The __getattr__ and", "new user...\") # Might as well make sure that we don't have other", "pathlib from spectralsequence_chart import SseqChart from spectralsequence_chart.serialization import JSON from working_directory import get_working_directory_a,", "= self.chart.__dir__() # result.extend(self.__dict__.keys()) # return sorted(set(result)) # def __getattr__(self, name): # \"\"\"", "displays = {} def __init__(self, name, chart=None): self.name = name self.chart = None", "def __repr__(self): if self._started: return f'{type(self).__name__}(name=\"{self.name}\", url=\"{self.url}\", chart={self.chart})' return f\"\"\"{type(self).__name__}(name=\"{self.name}\", state=\"Not started, run", "continue # # The __getattr__ and __dir__ methods above aren't enough to get", "save_a(self): await self.save_file_handle.ensure_open_a(modify=True) await self.save_file_handle.write_text_a(JSON.stringify(self.chart)) async def save_as_a(self, path = None): if path:", "= property(wrapped_fget, wrapped_fset, wrapped_fdel) else: raise AssertionError() setattr(SseqDisplay, name, wrapped) # for a", ") from js_wrappers.async_js import Fetcher from js_wrappers.filesystem import FileHandle import json import pathlib", "= Fetcher(\"api/\") def create_display(name): disp = SseqDisplay(name) print(f\"Creating display at {disp.url}\") return disp.chart", "So let's send an update to the existing charts first. await self.update_a() self.subscribers[client_id]", "*args, **kwargs) return wrap def _bind_chart_attribute(name): func = getattr(SseqChart, name) func_type_name = type(func).__name__", "good docs for the SseqChart functions. \"\"\" # if not hasattr(self.chart, name): #", "kwargs=kwargs ))) @handle(\"new_user\") async def new_user__a(self, uuid, port, client_id): print(\"Handling new user...\") #", "to setting the chart. # \"initialize\" does a superset of what \"reset\" does.", "args, port, client_id, uuid, kwargs): kwargs = dict(kwargs) console.log(f\"SseqDisplay.handle_message({cmd}, {JSON.stringify(kwargs)})\") self.executor.loop.call_soon(self.message_handlers[cmd]( self, uuid=uuid,", "True response = await fetcher.put(f\"charts/{self.name}\", {}) if response.status >= 400: raise Exception(f\"Failed to", "\"initialize\" does a superset of what \"reset\" does. port.postMessage(SseqDisplay._create_message(\"chart.state.initialize\", state = self.chart.to_json())) @handle(\"initialize.complete\")", "self.save_file_handle.open_a() self.set_sseq(JSON.parse(await self.save_file_handle.read_text_a())) await self.reset_state_a() @staticmethod def dispatch_message(obj): message = json.loads(obj[\"message\"]) del obj[\"message\"]", "to communicate between the SseqChart and the browser. All of the data is", "directory = str(pathlib.Path(location.pathname).parent) return f\"{location.protocol}//{location.host}{directory}/charts/{self.name}\" async def start_a(self): if self._started: return self._started =", "= await response.json() print(f'Display started. Visit \"{self.url}\" to view.') async def reset_state_a(self): with", "def _create_message(cmd, **kwargs): return JSON.stringify(dict(cmd=cmd, args=[], kwargs=kwargs)) async def send_message_a(self, cmd, **kwargs): message", "charts that are out of date. # So let's send an update to", "dir(SseqDisplay): # continue # # The __getattr__ and __dir__ methods above aren't enough", "send_message_a(self, cmd, **kwargs): message = SseqDisplay._create_message(cmd, **kwargs) for port in self.subscribers.values(): port.postMessage(message) async", "self.save_file_handle.read_text_a())) await self.reset_state_a() @staticmethod def dispatch_message(obj): message = json.loads(obj[\"message\"]) del obj[\"message\"] message.update(obj) chart_name", "/ __getattr__ work only for methods and this works only for properties... #", "port, cmd, uuid, **kwargs): port.postMessage(JSON.stringify(dict( cmd=cmd, uuid=uuid, args=[], kwargs=kwargs ))) @handle(\"new_user\") async def", "import collect_handlers, handle fetcher = Fetcher(\"api/\") def create_display(name): disp = SseqDisplay(name) print(f\"Creating display", "if func.fget: wrapped_fget = _wrap_chart_func(func.fget) if func.fset: wrapped_fset = _wrap_chart_func(func.fset) if func.fdel: wrapped_fdel", "= message[\"chart_name\"] del message[\"chart_name\"] display = SseqDisplay.displays[chart_name] display.handle_message(**message) def handle_message(self, cmd, args, port,", "= await fetcher.put(f\"charts/{self.name}\", {}) if response.status >= 400: raise Exception(f\"Failed to create chart:", "copy a wrapper from SseqChart to SseqDisplay. # # Note that if we", "displayed. You may want to store the chart into a variable and use", "__dir__ / __getattr__ work only for methods and this works only for properties...", "import SseqChart from spectralsequence_chart.serialization import JSON from working_directory import get_working_directory_a, set_working_directory_a from functools", "use it directly. \"\"\" # displays = {} def __init__(self, name, chart=None): self.name", "chart_name = message[\"chart_name\"] del message[\"chart_name\"] display = SseqDisplay.displays[chart_name] display.handle_message(**message) def handle_message(self, cmd, args,", "result = self.chart.__dir__() # result.extend(self.__dict__.keys()) # return sorted(set(result)) # def __getattr__(self, name): #", "chart self.chart._agent = self @property def url(self): directory = str(pathlib.Path(location.pathname).parent) return f\"{location.protocol}//{location.host}{directory}/charts/{self.name}\" async", "message = json.loads(obj[\"message\"]) del obj[\"message\"] message.update(obj) chart_name = message[\"chart_name\"] del message[\"chart_name\"] display =", "well make sure that we don't have other charts that are out of", "get_working_directory_a() if not working_directory: raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a(create=True) else: self.save_file_handle =", "def send_batched_messages_a(self, messages): console.log(\"Sending batched messages:\", messages) await self.send_message_a(\"chart.update\", messages = messages) await", "console.log(f\"SseqDisplay.handle_message({cmd}, {JSON.stringify(kwargs)})\") self.executor.loop.call_soon(self.message_handlers[cmd]( self, uuid=uuid, port=port, client_id=client_id, **kwargs )) @staticmethod def _create_message(cmd, **kwargs):", "port, client_id): print(\"Handling new user...\") # Might as well make sure that we", "dir have to be set up carefully to allow jedi to provide good", "functools import wraps from repl.handler_decorator import collect_handlers, handle fetcher = Fetcher(\"api/\") def create_display(name):", "print(f'Display started. Visit \"{self.url}\" to view.') async def reset_state_a(self): with self.chart._batched_messages_lock: self.chart._clear_batched_messages() await", "= self from repl.executor import Executor self.executor = Executor.executor self._started = False self.executor.loop.call_soon(self.start_a())", "allow jedi to provide good docs for the SseqChart functions. \"\"\" # if", "dir(SseqChart): # if a.startswith(\"_\") or a in dir(SseqDisplay): # continue # # The", "not None: self.chart._agent = None self.chart = chart self.chart._agent = self @property def", "message[\"chart_name\"] del message[\"chart_name\"] display = SseqDisplay.displays[chart_name] display.handle_message(**message) def handle_message(self, cmd, args, port, client_id,", "self.chart = chart self.chart._agent = self @property def url(self): directory = str(pathlib.Path(location.pathname).parent) return", "uuid, port, client_id): print(\"Handling new user...\") # Might as well make sure that", "send an update to the existing charts first. await self.update_a() self.subscribers[client_id] = port", "import Executor self.executor = Executor.executor self._started = False self.executor.loop.call_soon(self.start_a()) def __repr__(self): if self._started:", "def send_message_a(self, cmd, **kwargs): message = SseqDisplay._create_message(cmd, **kwargs) for port in self.subscribers.values(): port.postMessage(message)", "is contained in the field SseqDisplay.chart which is the SseqChart object that is", "cmd, args, port, client_id, uuid, kwargs): kwargs = dict(kwargs) console.log(f\"SseqDisplay.handle_message({cmd}, {JSON.stringify(kwargs)})\") self.executor.loop.call_soon(self.message_handlers[cmd]( self,", "# \"initialize\" command sets chart range and page in addition to setting the", "\"function\": wrapped = _wrap_chart_func(func) elif func_type_name == \"property\": wrapped_fget = None wrapped_fset =", "batched messages:\", messages) await self.send_message_a(\"chart.update\", messages = messages) await self.maybe_autosave_a() async def maybe_autosave_a(self):", "\"property\": wrapped_fget = None wrapped_fset = None wrapped_fdel = None if func.fget: wrapped_fget", "start.\")\"\"\" # def __dir__(self): # \"\"\" getattr and dir have to be set", "SseqDisplay: \"\"\" A Spectral Sequence display. This contains the logic to communicate between", "chart: {response.status_text}\") body = await response.json() print(f'Display started. Visit \"{self.url}\" to view.') async", "= None self.chart = chart self.chart._agent = self @property def url(self): directory =", "= {} def __init__(self, name, chart=None): self.name = name self.chart = None self.save_file_handle", "if not working_directory: raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a(create=True) else: self.save_file_handle = FileHandle()", "await get_working_directory_a() if not working_directory: raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a() else: self.save_file_handle", "and page in addition to setting the chart. # \"initialize\" does a superset", "command sets chart range and page in addition to setting the chart. #", "# The __getattr__ and __dir__ methods above aren't enough to get docs for", "def update(self): self.executor.loop.call_soon(self.update_a()) async def update_a(self): await self.chart.update_a() async def send_batched_messages_a(self, messages): console.log(\"Sending", "chart into a variable and use it directly. \"\"\" # displays = {}", "import json import pathlib from spectralsequence_chart import SseqChart from spectralsequence_chart.serialization import JSON from", "self.subscribers[client_id] = port # \"initialize\" command sets chart range and page in addition", "state = self.chart.to_json())) @handle(\"initialize.complete\") async def initialize__complete__a(self, uuid, port, client_id): print(\"initialize.complete\") def _wrap_chart_func(func):", "func(self.chart, *args, **kwargs) return wrap def _bind_chart_attribute(name): func = getattr(SseqChart, name) func_type_name =", "kwargs=kwargs)) async def send_message_a(self, cmd, **kwargs): message = SseqDisplay._create_message(cmd, **kwargs) for port in", "self.save_file_handle = FileHandle() self.autosave = False chart = chart or SseqChart(name) self.set_sseq(chart) self.subscribers", "return self._started = True response = await fetcher.put(f\"charts/{self.name}\", {}) if response.status >= 400:", "return f\"{location.protocol}//{location.host}{directory}/charts/{self.name}\" async def start_a(self): if self._started: return self._started = True response =", "@collect_handlers(\"message_handlers\") class SseqDisplay: \"\"\" A Spectral Sequence display. This contains the logic to", "being displayed. You may want to store the chart into a variable and", "field SseqDisplay.chart which is the SseqChart object that is being displayed. You may", "the SseqChart functions. \"\"\" # if not hasattr(self.chart, name): # raise AttributeError(f'Instance of", "== \"function\": wrapped = _wrap_chart_func(func) elif func_type_name == \"property\": wrapped_fget = None wrapped_fset", "dispatch_message(obj): message = json.loads(obj[\"message\"]) del obj[\"message\"] message.update(obj) chart_name = message[\"chart_name\"] del message[\"chart_name\"] display", "self.executor.loop.call_soon(self.message_handlers[cmd]( self, uuid=uuid, port=port, client_id=client_id, **kwargs )) @staticmethod def _create_message(cmd, **kwargs): return JSON.stringify(dict(cmd=cmd,", "wrapper from SseqChart to SseqDisplay. # # Note that if we do this", "methods above aren't enough to get docs for properties. # # For properties,", "Visit \"{self.url}\" to view.') async def reset_state_a(self): with self.chart._batched_messages_lock: self.chart._clear_batched_messages() await self.send_message_a(\"chart.state.reset\", state", "and self.save_file_handle.is_open(): await self.save_a() async def save_a(self): await self.save_file_handle.ensure_open_a(modify=True) await self.save_file_handle.write_text_a(JSON.stringify(self.chart)) async def", "variable and use it directly. \"\"\" # displays = {} def __init__(self, name,", "response.json() print(f'Display started. Visit \"{self.url}\" to view.') async def reset_state_a(self): with self.chart._batched_messages_lock: self.chart._clear_batched_messages()", "# for a in dir(SseqChart): # if a.startswith(\"_\") or a in dir(SseqDisplay): #", "to be set up carefully to allow jedi to provide good docs for", "self.chart.__dir__() # result.extend(self.__dict__.keys()) # return sorted(set(result)) # def __getattr__(self, name): # \"\"\" getattr", "update_a(self): await self.chart.update_a() async def send_batched_messages_a(self, messages): console.log(\"Sending batched messages:\", messages) await self.send_message_a(\"chart.update\",", "# \"\"\" getattr and dir have to be set up carefully to allow", "self.chart = None self.save_file_handle = FileHandle() self.autosave = False chart = chart or", "client_id): print(\"Handling new user...\") # Might as well make sure that we don't", "= SseqDisplay._create_message(cmd, **kwargs) for port in self.subscribers.values(): port.postMessage(message) async def send_message_to_target_client_a(self, port, cmd,", "return disp.chart async def load_display_a(name): disp = SseqDisplay(name) await disp.load_a() return disp.chart @collect_handlers(\"message_handlers\")", "= None if func.fget: wrapped_fget = _wrap_chart_func(func.fget) if func.fset: wrapped_fset = _wrap_chart_func(func.fset) if", "def reset_state_a(self): with self.chart._batched_messages_lock: self.chart._clear_batched_messages() await self.send_message_a(\"chart.state.reset\", state = self.chart.to_json()) await self.maybe_autosave_a() def", "kwargs = dict(kwargs) console.log(f\"SseqDisplay.handle_message({cmd}, {JSON.stringify(kwargs)})\") self.executor.loop.call_soon(self.message_handlers[cmd]( self, uuid=uuid, port=port, client_id=client_id, **kwargs )) @staticmethod", "def __getattr__(self, name): # \"\"\" getattr and dir have to be set up", "@staticmethod def _create_message(cmd, **kwargs): return JSON.stringify(dict(cmd=cmd, args=[], kwargs=kwargs)) async def send_message_a(self, cmd, **kwargs):", "display at {disp.url}\") return disp.chart async def load_display_a(name): disp = SseqDisplay(name) await disp.load_a()", "async def load_display_a(name): disp = SseqDisplay(name) await disp.load_a() return disp.chart @collect_handlers(\"message_handlers\") class SseqDisplay:", "of {self.__class__.__name__} has no attribute {name}') # return getattr(self.chart, name) def load_json(self, json_obj):", "name self.chart = None self.save_file_handle = FileHandle() self.autosave = False chart = chart", "it directly. \"\"\" # displays = {} def __init__(self, name, chart=None): self.name =", "= chart self.chart._agent = self @property def url(self): directory = str(pathlib.Path(location.pathname).parent) return f\"{location.protocol}//{location.host}{directory}/charts/{self.name}\"", "def _wrap_chart_func(func): @wraps(func) def wrap(self, *args, **kwargs): return func(self.chart, *args, **kwargs) return wrap", "SseqChart(name) self.set_sseq(chart) self.subscribers = {} SseqDisplay.displays[name] = self from repl.executor import Executor self.executor", "that are out of date. # So let's send an update to the", "the data is contained in the field SseqDisplay.chart which is the SseqChart object", "None if func.fget: wrapped_fget = _wrap_chart_func(func.fget) if func.fset: wrapped_fset = _wrap_chart_func(func.fset) if func.fdel:", "initialize__complete__a(self, uuid, port, client_id): print(\"initialize.complete\") def _wrap_chart_func(func): @wraps(func) def wrap(self, *args, **kwargs): return", "None: self.chart._agent = None self.chart = chart self.chart._agent = self @property def url(self):", "__getattr__ work only for methods and this works only for properties... # if", "@staticmethod def dispatch_message(obj): message = json.loads(obj[\"message\"]) del obj[\"message\"] message.update(obj) chart_name = message[\"chart_name\"] del", "a in dir(SseqDisplay): # continue # # The __getattr__ and __dir__ methods above", "= _wrap_chart_func(func.fset) if func.fdel: wrapped_fdel = _wrap_chart_func(func.fdel) wrapped = property(wrapped_fget, wrapped_fset, wrapped_fdel) else:", "chart. # \"initialize\" does a superset of what \"reset\" does. port.postMessage(SseqDisplay._create_message(\"chart.state.initialize\", state =", "properties, we copy a wrapper from SseqChart to SseqDisplay. # # Note that", "only for methods and this works only for properties... # if type(getattr(SseqChart, a))", "await disp.load_a() return disp.chart @collect_handlers(\"message_handlers\") class SseqDisplay: \"\"\" A Spectral Sequence display. This", "started, run 'await display.start_a()' to start.\")\"\"\" # def __dir__(self): # \"\"\" getattr and", "range and page in addition to setting the chart. # \"initialize\" does a", "**kwargs) return wrap def _bind_chart_attribute(name): func = getattr(SseqChart, name) func_type_name = type(func).__name__ if", "import pathlib from spectralsequence_chart import SseqChart from spectralsequence_chart.serialization import JSON from working_directory import", "in self.subscribers.values(): port.postMessage(message) async def send_message_to_target_client_a(self, port, cmd, uuid, **kwargs): port.postMessage(JSON.stringify(dict( cmd=cmd, uuid=uuid,", "False self.executor.loop.call_soon(self.start_a()) def __repr__(self): if self._started: return f'{type(self).__name__}(name=\"{self.name}\", url=\"{self.url}\", chart={self.chart})' return f\"\"\"{type(self).__name__}(name=\"{self.name}\", state=\"Not", "self.save_file_handle = await working_directory.path(path).resolve_file_handle_a() else: self.save_file_handle = FileHandle() await self.save_file_handle.open_a() self.set_sseq(JSON.parse(await self.save_file_handle.read_text_a())) await", "existing charts first. await self.update_a() self.subscribers[client_id] = port # \"initialize\" command sets chart", "port, client_id): print(\"initialize.complete\") def _wrap_chart_func(func): @wraps(func) def wrap(self, *args, **kwargs): return func(self.chart, *args,", "self.chart.to_json()) await self.maybe_autosave_a() def update(self): self.executor.loop.call_soon(self.update_a()) async def update_a(self): await self.chart.update_a() async def", "str: json_obj = json.loads(json_obj) self.set_sseq(SseqChart.from_json(json_obj)) def set_sseq(self, chart): if self.chart is not None:", "properties. # # For properties, we copy a wrapper from SseqChart to SseqDisplay.", "at {disp.url}\") return disp.chart async def load_display_a(name): disp = SseqDisplay(name) await disp.load_a() return", "it screws up jedi get_signatures. # # So __dir__ / __getattr__ work only", "args=[], kwargs=kwargs ))) @handle(\"new_user\") async def new_user__a(self, uuid, port, client_id): print(\"Handling new user...\")", "SseqChart to SseqDisplay. # # Note that if we do this for methods", "SseqDisplay.displays[name] = self from repl.executor import Executor self.executor = Executor.executor self._started = False", "def load_a(self, path = None): if path: working_directory = await get_working_directory_a() if not", "from js_wrappers.filesystem import FileHandle import json import pathlib from spectralsequence_chart import SseqChart from", "= port # \"initialize\" command sets chart range and page in addition to", "json.loads(json_obj) self.set_sseq(SseqChart.from_json(json_obj)) def set_sseq(self, chart): if self.chart is not None: self.chart._agent = None", ">= 400: raise Exception(f\"Failed to create chart: {response.status_text}\") body = await response.json() print(f'Display", "client_id=client_id, **kwargs )) @staticmethod def _create_message(cmd, **kwargs): return JSON.stringify(dict(cmd=cmd, args=[], kwargs=kwargs)) async def", "await self.save_file_handle.open_a() self.set_sseq(JSON.parse(await self.save_file_handle.read_text_a())) await self.reset_state_a() @staticmethod def dispatch_message(obj): message = json.loads(obj[\"message\"]) del", "set_working_directory_a from functools import wraps from repl.handler_decorator import collect_handlers, handle fetcher = Fetcher(\"api/\")", "name): # \"\"\" getattr and dir have to be set up carefully to", "up jedi get_signatures. # # So __dir__ / __getattr__ work only for methods", "__getattr__(self, name): # \"\"\" getattr and dir have to be set up carefully", "js_wrappers.filesystem import FileHandle import json import pathlib from spectralsequence_chart import SseqChart from spectralsequence_chart.serialization", "port in self.subscribers.values(): port.postMessage(message) async def send_message_to_target_client_a(self, port, cmd, uuid, **kwargs): port.postMessage(JSON.stringify(dict( cmd=cmd,", "date. # So let's send an update to the existing charts first. await", "client_id): print(\"initialize.complete\") def _wrap_chart_func(func): @wraps(func) def wrap(self, *args, **kwargs): return func(self.chart, *args, **kwargs)", "# return getattr(self.chart, name) def load_json(self, json_obj): if type(json_obj) is str: json_obj =", "async def send_message_a(self, cmd, **kwargs): message = SseqDisplay._create_message(cmd, **kwargs) for port in self.subscribers.values():", "= SseqDisplay.displays[chart_name] display.handle_message(**message) def handle_message(self, cmd, args, port, client_id, uuid, kwargs): kwargs =", "if self.chart is not None: self.chart._agent = None self.chart = chart self.chart._agent =", "up carefully to allow jedi to provide good docs for the SseqChart functions.", "elif func_type_name == \"property\": wrapped_fget = None wrapped_fset = None wrapped_fdel = None", "load_a(self, path = None): if path: working_directory = await get_working_directory_a() if not working_directory:", "self._started: return f'{type(self).__name__}(name=\"{self.name}\", url=\"{self.url}\", chart={self.chart})' return f\"\"\"{type(self).__name__}(name=\"{self.name}\", state=\"Not started, run 'await display.start_a()' to", "provide good docs for the SseqChart functions. \"\"\" # result = self.chart.__dir__() #", "json import pathlib from spectralsequence_chart import SseqChart from spectralsequence_chart.serialization import JSON from working_directory", "async def send_batched_messages_a(self, messages): console.log(\"Sending batched messages:\", messages) await self.send_message_a(\"chart.update\", messages = messages)", "as well make sure that we don't have other charts that are out", "carefully to allow jedi to provide good docs for the SseqChart functions. \"\"\"", "def _bind_chart_attribute(name): func = getattr(SseqChart, name) func_type_name = type(func).__name__ if func_type_name == \"function\":", "to get docs for properties. # # For properties, we copy a wrapper", "message.update(obj) chart_name = message[\"chart_name\"] del message[\"chart_name\"] display = SseqDisplay.displays[chart_name] display.handle_message(**message) def handle_message(self, cmd,", "from js_wrappers.async_js import Fetcher from js_wrappers.filesystem import FileHandle import json import pathlib from", "self._started: return self._started = True response = await fetcher.put(f\"charts/{self.name}\", {}) if response.status >=", "del message[\"chart_name\"] display = SseqDisplay.displays[chart_name] display.handle_message(**message) def handle_message(self, cmd, args, port, client_id, uuid,", "what \"reset\" does. port.postMessage(SseqDisplay._create_message(\"chart.state.initialize\", state = self.chart.to_json())) @handle(\"initialize.complete\") async def initialize__complete__a(self, uuid, port,", "await self.chart.update_a() async def send_batched_messages_a(self, messages): console.log(\"Sending batched messages:\", messages) await self.send_message_a(\"chart.update\", messages", "{} def __init__(self, name, chart=None): self.name = name self.chart = None self.save_file_handle =", "self.maybe_autosave_a() async def maybe_autosave_a(self): if self.autosave and self.save_file_handle.is_open(): await self.save_a() async def save_a(self):", "async def save_a(self): await self.save_file_handle.ensure_open_a(modify=True) await self.save_file_handle.write_text_a(JSON.stringify(self.chart)) async def save_as_a(self, path = None):", "have other charts that are out of date. # So let's send an", "= FileHandle() await self.save_a() async def load_a(self, path = None): if path: working_directory", "def handle_message(self, cmd, args, port, client_id, uuid, kwargs): kwargs = dict(kwargs) console.log(f\"SseqDisplay.handle_message({cmd}, {JSON.stringify(kwargs)})\")", "between the SseqChart and the browser. All of the data is contained in", "def new_user__a(self, uuid, port, client_id): print(\"Handling new user...\") # Might as well make", "async def maybe_autosave_a(self): if self.autosave and self.save_file_handle.is_open(): await self.save_a() async def save_a(self): await", "\"\"\" # if not hasattr(self.chart, name): # raise AttributeError(f'Instance of {self.__class__.__name__} has no", "view.') async def reset_state_a(self): with self.chart._batched_messages_lock: self.chart._clear_batched_messages() await self.send_message_a(\"chart.state.reset\", state = self.chart.to_json()) await", "path: working_directory = await get_working_directory_a() if not working_directory: raise RuntimeError(\"...\") self.save_file_handle = await", "page in addition to setting the chart. # \"initialize\" does a superset of", "@handle(\"initialize.complete\") async def initialize__complete__a(self, uuid, port, client_id): print(\"initialize.complete\") def _wrap_chart_func(func): @wraps(func) def wrap(self,", "None wrapped_fset = None wrapped_fdel = None if func.fget: wrapped_fget = _wrap_chart_func(func.fget) if", "FileHandle() self.autosave = False chart = chart or SseqChart(name) self.set_sseq(chart) self.subscribers = {}", "@property def url(self): directory = str(pathlib.Path(location.pathname).parent) return f\"{location.protocol}//{location.host}{directory}/charts/{self.name}\" async def start_a(self): if self._started:", "self.executor.loop.call_soon(self.start_a()) def __repr__(self): if self._started: return f'{type(self).__name__}(name=\"{self.name}\", url=\"{self.url}\", chart={self.chart})' return f\"\"\"{type(self).__name__}(name=\"{self.name}\", state=\"Not started,", "self.save_file_handle = await working_directory.path(path).resolve_file_handle_a(create=True) else: self.save_file_handle = FileHandle() await self.save_a() async def load_a(self,", "RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a(create=True) else: self.save_file_handle = FileHandle() await self.save_a() async def", "SseqDisplay.displays[chart_name] display.handle_message(**message) def handle_message(self, cmd, args, port, client_id, uuid, kwargs): kwargs = dict(kwargs)", "None wrapped_fdel = None if func.fget: wrapped_fget = _wrap_chart_func(func.fget) if func.fset: wrapped_fset =", "AttributeError(f'Instance of {self.__class__.__name__} has no attribute {name}') # return getattr(self.chart, name) def load_json(self,", "{}) if response.status >= 400: raise Exception(f\"Failed to create chart: {response.status_text}\") body =", "await self.maybe_autosave_a() def update(self): self.executor.loop.call_soon(self.update_a()) async def update_a(self): await self.chart.update_a() async def send_batched_messages_a(self,", "uuid, kwargs): kwargs = dict(kwargs) console.log(f\"SseqDisplay.handle_message({cmd}, {JSON.stringify(kwargs)})\") self.executor.loop.call_soon(self.message_handlers[cmd]( self, uuid=uuid, port=port, client_id=client_id, **kwargs", "self._started = True response = await fetcher.put(f\"charts/{self.name}\", {}) if response.status >= 400: raise", "name) def load_json(self, json_obj): if type(json_obj) is str: json_obj = json.loads(json_obj) self.set_sseq(SseqChart.from_json(json_obj)) def", "working_directory.path(path).resolve_file_handle_a(create=True) else: self.save_file_handle = FileHandle() await self.save_a() async def load_a(self, path = None):", "= messages) await self.maybe_autosave_a() async def maybe_autosave_a(self): if self.autosave and self.save_file_handle.is_open(): await self.save_a()", "get_working_directory_a, set_working_directory_a from functools import wraps from repl.handler_decorator import collect_handlers, handle fetcher =", "an update to the existing charts first. await self.update_a() self.subscribers[client_id] = port #", "= self @property def url(self): directory = str(pathlib.Path(location.pathname).parent) return f\"{location.protocol}//{location.host}{directory}/charts/{self.name}\" async def start_a(self):", "_wrap_chart_func(func.fset) if func.fdel: wrapped_fdel = _wrap_chart_func(func.fdel) wrapped = property(wrapped_fget, wrapped_fset, wrapped_fdel) else: raise", "\"reset\" does. port.postMessage(SseqDisplay._create_message(\"chart.state.initialize\", state = self.chart.to_json())) @handle(\"initialize.complete\") async def initialize__complete__a(self, uuid, port, client_id):", "result.extend(self.__dict__.keys()) # return sorted(set(result)) # def __getattr__(self, name): # \"\"\" getattr and dir", "_bind_chart_attribute(name): func = getattr(SseqChart, name) func_type_name = type(func).__name__ if func_type_name == \"function\": wrapped", "repl.handler_decorator import collect_handlers, handle fetcher = Fetcher(\"api/\") def create_display(name): disp = SseqDisplay(name) print(f\"Creating", "def url(self): directory = str(pathlib.Path(location.pathname).parent) return f\"{location.protocol}//{location.host}{directory}/charts/{self.name}\" async def start_a(self): if self._started: return", "from spectralsequence_chart.serialization import JSON from working_directory import get_working_directory_a, set_working_directory_a from functools import wraps", "= FileHandle() await self.save_file_handle.open_a() self.set_sseq(JSON.parse(await self.save_file_handle.read_text_a())) await self.reset_state_a() @staticmethod def dispatch_message(obj): message =", "display.start_a()' to start.\")\"\"\" # def __dir__(self): # \"\"\" getattr and dir have to", "f'{type(self).__name__}(name=\"{self.name}\", url=\"{self.url}\", chart={self.chart})' return f\"\"\"{type(self).__name__}(name=\"{self.name}\", state=\"Not started, run 'await display.start_a()' to start.\")\"\"\" #", "the chart. # \"initialize\" does a superset of what \"reset\" does. port.postMessage(SseqDisplay._create_message(\"chart.state.initialize\", state", "**kwargs): message = SseqDisplay._create_message(cmd, **kwargs) for port in self.subscribers.values(): port.postMessage(message) async def send_message_to_target_client_a(self,", "if self._started: return self._started = True response = await fetcher.put(f\"charts/{self.name}\", {}) if response.status", "( location, console ) from js_wrappers.async_js import Fetcher from js_wrappers.filesystem import FileHandle import", "directly. \"\"\" # displays = {} def __init__(self, name, chart=None): self.name = name", "obj[\"message\"] message.update(obj) chart_name = message[\"chart_name\"] del message[\"chart_name\"] display = SseqDisplay.displays[chart_name] display.handle_message(**message) def handle_message(self,", "func = getattr(SseqChart, name) func_type_name = type(func).__name__ if func_type_name == \"function\": wrapped =", "above aren't enough to get docs for properties. # # For properties, we", "{name}') # return getattr(self.chart, name) def load_json(self, json_obj): if type(json_obj) is str: json_obj", "@wraps(func) def wrap(self, *args, **kwargs): return func(self.chart, *args, **kwargs) return wrap def _bind_chart_attribute(name):", "that is being displayed. You may want to store the chart into a", "update(self): self.executor.loop.call_soon(self.update_a()) async def update_a(self): await self.chart.update_a() async def send_batched_messages_a(self, messages): console.log(\"Sending batched", "await self.save_a() async def load_a(self, path = None): if path: working_directory = await", "async def start_a(self): if self._started: return self._started = True response = await fetcher.put(f\"charts/{self.name}\",", "working_directory.path(path).resolve_file_handle_a() else: self.save_file_handle = FileHandle() await self.save_file_handle.open_a() self.set_sseq(JSON.parse(await self.save_file_handle.read_text_a())) await self.reset_state_a() @staticmethod def", "we do this for methods too, it screws up jedi get_signatures. # #", "chart={self.chart})' return f\"\"\"{type(self).__name__}(name=\"{self.name}\", state=\"Not started, run 'await display.start_a()' to start.\")\"\"\" # def __dir__(self):", "return f\"\"\"{type(self).__name__}(name=\"{self.name}\", state=\"Not started, run 'await display.start_a()' to start.\")\"\"\" # def __dir__(self): #", "f\"\"\"{type(self).__name__}(name=\"{self.name}\", state=\"Not started, run 'await display.start_a()' to start.\")\"\"\" # def __dir__(self): # \"\"\"", "run 'await display.start_a()' to start.\")\"\"\" # def __dir__(self): # \"\"\" getattr and dir", "Fetcher from js_wrappers.filesystem import FileHandle import json import pathlib from spectralsequence_chart import SseqChart", "attribute {name}') # return getattr(self.chart, name) def load_json(self, json_obj): if type(json_obj) is str:", "\"\"\" getattr and dir have to be set up carefully to allow jedi", "await self.reset_state_a() @staticmethod def dispatch_message(obj): message = json.loads(obj[\"message\"]) del obj[\"message\"] message.update(obj) chart_name =", "data is contained in the field SseqDisplay.chart which is the SseqChart object that", "await self.send_message_a(\"chart.state.reset\", state = self.chart.to_json()) await self.maybe_autosave_a() def update(self): self.executor.loop.call_soon(self.update_a()) async def update_a(self):", "raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a(create=True) else: self.save_file_handle = FileHandle() await self.save_a() async", "= self.chart.to_json()) await self.maybe_autosave_a() def update(self): self.executor.loop.call_soon(self.update_a()) async def update_a(self): await self.chart.update_a() async", "if not hasattr(self.chart, name): # raise AttributeError(f'Instance of {self.__class__.__name__} has no attribute {name}')", "self.reset_state_a() @staticmethod def dispatch_message(obj): message = json.loads(obj[\"message\"]) del obj[\"message\"] message.update(obj) chart_name = message[\"chart_name\"]", "**kwargs): return JSON.stringify(dict(cmd=cmd, args=[], kwargs=kwargs)) async def send_message_a(self, cmd, **kwargs): message = SseqDisplay._create_message(cmd,", "# return sorted(set(result)) # def __getattr__(self, name): # \"\"\" getattr and dir have", "await self.save_file_handle.write_text_a(JSON.stringify(self.chart)) async def save_as_a(self, path = None): if path: working_directory = await", "= False chart = chart or SseqChart(name) self.set_sseq(chart) self.subscribers = {} SseqDisplay.displays[name] =", "Executor self.executor = Executor.executor self._started = False self.executor.loop.call_soon(self.start_a()) def __repr__(self): if self._started: return", "def create_display(name): disp = SseqDisplay(name) print(f\"Creating display at {disp.url}\") return disp.chart async def", "*args, **kwargs): return func(self.chart, *args, **kwargs) return wrap def _bind_chart_attribute(name): func = getattr(SseqChart,", "in addition to setting the chart. # \"initialize\" does a superset of what", "wrap(self, *args, **kwargs): return func(self.chart, *args, **kwargs) return wrap def _bind_chart_attribute(name): func =", "raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a() else: self.save_file_handle = FileHandle() await self.save_file_handle.open_a() self.set_sseq(JSON.parse(await", "uuid=uuid, port=port, client_id=client_id, **kwargs )) @staticmethod def _create_message(cmd, **kwargs): return JSON.stringify(dict(cmd=cmd, args=[], kwargs=kwargs))", "None self.save_file_handle = FileHandle() self.autosave = False chart = chart or SseqChart(name) self.set_sseq(chart)", "print(\"Handling new user...\") # Might as well make sure that we don't have", "may want to store the chart into a variable and use it directly.", "json.loads(obj[\"message\"]) del obj[\"message\"] message.update(obj) chart_name = message[\"chart_name\"] del message[\"chart_name\"] display = SseqDisplay.displays[chart_name] display.handle_message(**message)", "# # For properties, we copy a wrapper from SseqChart to SseqDisplay. #", "from repl.handler_decorator import collect_handlers, handle fetcher = Fetcher(\"api/\") def create_display(name): disp = SseqDisplay(name)", "raise AttributeError(f'Instance of {self.__class__.__name__} has no attribute {name}') # return getattr(self.chart, name) def", "__dir__ methods above aren't enough to get docs for properties. # # For", "the logic to communicate between the SseqChart and the browser. All of the", "SseqDisplay.chart which is the SseqChart object that is being displayed. You may want", "allow jedi to provide good docs for the SseqChart functions. \"\"\" # result", "create chart: {response.status_text}\") body = await response.json() print(f'Display started. Visit \"{self.url}\" to view.')", "functions. \"\"\" # result = self.chart.__dir__() # result.extend(self.__dict__.keys()) # return sorted(set(result)) # def", "not working_directory: raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a(create=True) else: self.save_file_handle = FileHandle() await", "maybe_autosave_a(self): if self.autosave and self.save_file_handle.is_open(): await self.save_a() async def save_a(self): await self.save_file_handle.ensure_open_a(modify=True) await", "this for methods too, it screws up jedi get_signatures. # # So __dir__", "if we do this for methods too, it screws up jedi get_signatures. #", "display. This contains the logic to communicate between the SseqChart and the browser.", "\"{self.url}\" to view.') async def reset_state_a(self): with self.chart._batched_messages_lock: self.chart._clear_batched_messages() await self.send_message_a(\"chart.state.reset\", state =", "import ( location, console ) from js_wrappers.async_js import Fetcher from js_wrappers.filesystem import FileHandle", "func_type_name = type(func).__name__ if func_type_name == \"function\": wrapped = _wrap_chart_func(func) elif func_type_name ==", "= None wrapped_fset = None wrapped_fdel = None if func.fget: wrapped_fget = _wrap_chart_func(func.fget)", "self.save_file_handle.write_text_a(JSON.stringify(self.chart)) async def save_as_a(self, path = None): if path: working_directory = await get_working_directory_a()", "the SseqChart and the browser. All of the data is contained in the", "to create chart: {response.status_text}\") body = await response.json() print(f'Display started. Visit \"{self.url}\" to", "# def __getattr__(self, name): # \"\"\" getattr and dir have to be set", "self.chart.to_json())) @handle(\"initialize.complete\") async def initialize__complete__a(self, uuid, port, client_id): print(\"initialize.complete\") def _wrap_chart_func(func): @wraps(func) def", "{response.status_text}\") body = await response.json() print(f'Display started. Visit \"{self.url}\" to view.') async def", "jedi to provide good docs for the SseqChart functions. \"\"\" # if not", "chart or SseqChart(name) self.set_sseq(chart) self.subscribers = {} SseqDisplay.displays[name] = self from repl.executor import", "await self.send_message_a(\"chart.update\", messages = messages) await self.maybe_autosave_a() async def maybe_autosave_a(self): if self.autosave and", "port # \"initialize\" command sets chart range and page in addition to setting", "type(func).__name__ if func_type_name == \"function\": wrapped = _wrap_chart_func(func) elif func_type_name == \"property\": wrapped_fget", "load_json(self, json_obj): if type(json_obj) is str: json_obj = json.loads(json_obj) self.set_sseq(SseqChart.from_json(json_obj)) def set_sseq(self, chart):", "started. Visit \"{self.url}\" to view.') async def reset_state_a(self): with self.chart._batched_messages_lock: self.chart._clear_batched_messages() await self.send_message_a(\"chart.state.reset\",", "if func.fdel: wrapped_fdel = _wrap_chart_func(func.fdel) wrapped = property(wrapped_fget, wrapped_fset, wrapped_fdel) else: raise AssertionError()", "So __dir__ / __getattr__ work only for methods and this works only for", "load_display_a(name): disp = SseqDisplay(name) await disp.load_a() return disp.chart @collect_handlers(\"message_handlers\") class SseqDisplay: \"\"\" A", "client_id, uuid, kwargs): kwargs = dict(kwargs) console.log(f\"SseqDisplay.handle_message({cmd}, {JSON.stringify(kwargs)})\") self.executor.loop.call_soon(self.message_handlers[cmd]( self, uuid=uuid, port=port, client_id=client_id,", "we copy a wrapper from SseqChart to SseqDisplay. # # Note that if", "SseqChart functions. \"\"\" # if not hasattr(self.chart, name): # raise AttributeError(f'Instance of {self.__class__.__name__}", "import wraps from repl.handler_decorator import collect_handlers, handle fetcher = Fetcher(\"api/\") def create_display(name): disp", "= dict(kwargs) console.log(f\"SseqDisplay.handle_message({cmd}, {JSON.stringify(kwargs)})\") self.executor.loop.call_soon(self.message_handlers[cmd]( self, uuid=uuid, port=port, client_id=client_id, **kwargs )) @staticmethod def", "state = self.chart.to_json()) await self.maybe_autosave_a() def update(self): self.executor.loop.call_soon(self.update_a()) async def update_a(self): await self.chart.update_a()", "**kwargs): return func(self.chart, *args, **kwargs) return wrap def _bind_chart_attribute(name): func = getattr(SseqChart, name)", "return func(self.chart, *args, **kwargs) return wrap def _bind_chart_attribute(name): func = getattr(SseqChart, name) func_type_name", "# displays = {} def __init__(self, name, chart=None): self.name = name self.chart =", "def load_json(self, json_obj): if type(json_obj) is str: json_obj = json.loads(json_obj) self.set_sseq(SseqChart.from_json(json_obj)) def set_sseq(self,", "**kwargs): port.postMessage(JSON.stringify(dict( cmd=cmd, uuid=uuid, args=[], kwargs=kwargs ))) @handle(\"new_user\") async def new_user__a(self, uuid, port,", "messages:\", messages) await self.send_message_a(\"chart.update\", messages = messages) await self.maybe_autosave_a() async def maybe_autosave_a(self): if", "contains the logic to communicate between the SseqChart and the browser. All of", "docs for the SseqChart functions. \"\"\" # if not hasattr(self.chart, name): # raise", "wrapped) # for a in dir(SseqChart): # if a.startswith(\"_\") or a in dir(SseqDisplay):", "wrapped_fget = None wrapped_fset = None wrapped_fdel = None if func.fget: wrapped_fget =", "display.handle_message(**message) def handle_message(self, cmd, args, port, client_id, uuid, kwargs): kwargs = dict(kwargs) console.log(f\"SseqDisplay.handle_message({cmd},", "a wrapper from SseqChart to SseqDisplay. # # Note that if we do", "for the SseqChart functions. \"\"\" # if not hasattr(self.chart, name): # raise AttributeError(f'Instance", "def update_a(self): await self.chart.update_a() async def send_batched_messages_a(self, messages): console.log(\"Sending batched messages:\", messages) await", "of the data is contained in the field SseqDisplay.chart which is the SseqChart", "# if not hasattr(self.chart, name): # raise AttributeError(f'Instance of {self.__class__.__name__} has no attribute", "# # The __getattr__ and __dir__ methods above aren't enough to get docs", "self.save_a() async def save_a(self): await self.save_file_handle.ensure_open_a(modify=True) await self.save_file_handle.write_text_a(JSON.stringify(self.chart)) async def save_as_a(self, path =", "Exception(f\"Failed to create chart: {response.status_text}\") body = await response.json() print(f'Display started. Visit \"{self.url}\"", "update to the existing charts first. await self.update_a() self.subscribers[client_id] = port # \"initialize\"", "get_working_directory_a() if not working_directory: raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a() else: self.save_file_handle =", "to allow jedi to provide good docs for the SseqChart functions. \"\"\" #", "getattr(SseqChart, name) func_type_name = type(func).__name__ if func_type_name == \"function\": wrapped = _wrap_chart_func(func) elif", "# if a.startswith(\"_\") or a in dir(SseqDisplay): # continue # # The __getattr__", "methods too, it screws up jedi get_signatures. # # So __dir__ / __getattr__", "__dir__(self): # \"\"\" getattr and dir have to be set up carefully to", "def save_as_a(self, path = None): if path: working_directory = await get_working_directory_a() if not", "# Might as well make sure that we don't have other charts that", "addition to setting the chart. # \"initialize\" does a superset of what \"reset\"", "name, wrapped) # for a in dir(SseqChart): # if a.startswith(\"_\") or a in", "the SseqChart object that is being displayed. You may want to store the", "and __dir__ methods above aren't enough to get docs for properties. # #", "name, chart=None): self.name = name self.chart = None self.save_file_handle = FileHandle() self.autosave =", "__getattr__ and __dir__ methods above aren't enough to get docs for properties. #", "methods and this works only for properties... # if type(getattr(SseqChart, a)) is property:", "self.chart._batched_messages_lock: self.chart._clear_batched_messages() await self.send_message_a(\"chart.state.reset\", state = self.chart.to_json()) await self.maybe_autosave_a() def update(self): self.executor.loop.call_soon(self.update_a()) async", "that we don't have other charts that are out of date. # So", "= type(func).__name__ if func_type_name == \"function\": wrapped = _wrap_chart_func(func) elif func_type_name == \"property\":", "and use it directly. \"\"\" # displays = {} def __init__(self, name, chart=None):", "contained in the field SseqDisplay.chart which is the SseqChart object that is being", "wrapped = property(wrapped_fget, wrapped_fset, wrapped_fdel) else: raise AssertionError() setattr(SseqDisplay, name, wrapped) # for", "_wrap_chart_func(func.fget) if func.fset: wrapped_fset = _wrap_chart_func(func.fset) if func.fdel: wrapped_fdel = _wrap_chart_func(func.fdel) wrapped =", "self.name = name self.chart = None self.save_file_handle = FileHandle() self.autosave = False chart", "self.autosave = False chart = chart or SseqChart(name) self.set_sseq(chart) self.subscribers = {} SseqDisplay.displays[name]", "For properties, we copy a wrapper from SseqChart to SseqDisplay. # # Note", "cmd=cmd, uuid=uuid, args=[], kwargs=kwargs ))) @handle(\"new_user\") async def new_user__a(self, uuid, port, client_id): print(\"Handling", "= self.chart.to_json())) @handle(\"initialize.complete\") async def initialize__complete__a(self, uuid, port, client_id): print(\"initialize.complete\") def _wrap_chart_func(func): @wraps(func)", "@handle(\"new_user\") async def new_user__a(self, uuid, port, client_id): print(\"Handling new user...\") # Might as", "\"initialize\" command sets chart range and page in addition to setting the chart.", "async def new_user__a(self, uuid, port, client_id): print(\"Handling new user...\") # Might as well", "if self._started: return f'{type(self).__name__}(name=\"{self.name}\", url=\"{self.url}\", chart={self.chart})' return f\"\"\"{type(self).__name__}(name=\"{self.name}\", state=\"Not started, run 'await display.start_a()'", "def __dir__(self): # \"\"\" getattr and dir have to be set up carefully", "working_directory = await get_working_directory_a() if not working_directory: raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a()", "wraps from repl.handler_decorator import collect_handlers, handle fetcher = Fetcher(\"api/\") def create_display(name): disp =", "if not working_directory: raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a() else: self.save_file_handle = FileHandle()", "other charts that are out of date. # So let's send an update", "reset_state_a(self): with self.chart._batched_messages_lock: self.chart._clear_batched_messages() await self.send_message_a(\"chart.state.reset\", state = self.chart.to_json()) await self.maybe_autosave_a() def update(self):", "and the browser. All of the data is contained in the field SseqDisplay.chart", "= _wrap_chart_func(func) elif func_type_name == \"property\": wrapped_fget = None wrapped_fset = None wrapped_fdel", "raise AssertionError() setattr(SseqDisplay, name, wrapped) # for a in dir(SseqChart): # if a.startswith(\"_\")", "good docs for the SseqChart functions. \"\"\" # result = self.chart.__dir__() # result.extend(self.__dict__.keys())", "too, it screws up jedi get_signatures. # # So __dir__ / __getattr__ work", "for methods and this works only for properties... # if type(getattr(SseqChart, a)) is", "from working_directory import get_working_directory_a, set_working_directory_a from functools import wraps from repl.handler_decorator import collect_handlers,", "SseqChart object that is being displayed. You may want to store the chart", "if response.status >= 400: raise Exception(f\"Failed to create chart: {response.status_text}\") body = await", "else: self.save_file_handle = FileHandle() await self.save_a() async def load_a(self, path = None): if", "screws up jedi get_signatures. # # So __dir__ / __getattr__ work only for", "_wrap_chart_func(func) elif func_type_name == \"property\": wrapped_fget = None wrapped_fset = None wrapped_fdel =", "= SseqDisplay(name) print(f\"Creating display at {disp.url}\") return disp.chart async def load_display_a(name): disp =", "SseqChart functions. \"\"\" # result = self.chart.__dir__() # result.extend(self.__dict__.keys()) # return sorted(set(result)) #", "str(pathlib.Path(location.pathname).parent) return f\"{location.protocol}//{location.host}{directory}/charts/{self.name}\" async def start_a(self): if self._started: return self._started = True response", "getattr and dir have to be set up carefully to allow jedi to", "self.send_message_a(\"chart.state.reset\", state = self.chart.to_json()) await self.maybe_autosave_a() def update(self): self.executor.loop.call_soon(self.update_a()) async def update_a(self): await", "self.set_sseq(JSON.parse(await self.save_file_handle.read_text_a())) await self.reset_state_a() @staticmethod def dispatch_message(obj): message = json.loads(obj[\"message\"]) del obj[\"message\"] message.update(obj)", "You may want to store the chart into a variable and use it", "working_directory: raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a(create=True) else: self.save_file_handle = FileHandle() await self.save_a()", "work only for methods and this works only for properties... # if type(getattr(SseqChart,", "= SseqDisplay(name) await disp.load_a() return disp.chart @collect_handlers(\"message_handlers\") class SseqDisplay: \"\"\" A Spectral Sequence", "Spectral Sequence display. This contains the logic to communicate between the SseqChart and", "# # Note that if we do this for methods too, it screws", "import JSON from working_directory import get_working_directory_a, set_working_directory_a from functools import wraps from repl.handler_decorator", "None): if path: working_directory = await get_working_directory_a() if not working_directory: raise RuntimeError(\"...\") self.save_file_handle", "don't have other charts that are out of date. # So let's send", "a.startswith(\"_\") or a in dir(SseqDisplay): # continue # # The __getattr__ and __dir__", "message = SseqDisplay._create_message(cmd, **kwargs) for port in self.subscribers.values(): port.postMessage(message) async def send_message_to_target_client_a(self, port,", "docs for properties. # # For properties, we copy a wrapper from SseqChart", "sorted(set(result)) # def __getattr__(self, name): # \"\"\" getattr and dir have to be", "await working_directory.path(path).resolve_file_handle_a(create=True) else: self.save_file_handle = FileHandle() await self.save_a() async def load_a(self, path =", "print(\"initialize.complete\") def _wrap_chart_func(func): @wraps(func) def wrap(self, *args, **kwargs): return func(self.chart, *args, **kwargs) return", "def set_sseq(self, chart): if self.chart is not None: self.chart._agent = None self.chart =", "wrapped_fdel = _wrap_chart_func(func.fdel) wrapped = property(wrapped_fget, wrapped_fset, wrapped_fdel) else: raise AssertionError() setattr(SseqDisplay, name,", "None self.chart = chart self.chart._agent = self @property def url(self): directory = str(pathlib.Path(location.pathname).parent)", "This contains the logic to communicate between the SseqChart and the browser. All", "send_batched_messages_a(self, messages): console.log(\"Sending batched messages:\", messages) await self.send_message_a(\"chart.update\", messages = messages) await self.maybe_autosave_a()", "display = SseqDisplay.displays[chart_name] display.handle_message(**message) def handle_message(self, cmd, args, port, client_id, uuid, kwargs): kwargs", "which is the SseqChart object that is being displayed. You may want to", "FileHandle() await self.save_a() async def load_a(self, path = None): if path: working_directory =", "self from repl.executor import Executor self.executor = Executor.executor self._started = False self.executor.loop.call_soon(self.start_a()) def", "# result.extend(self.__dict__.keys()) # return sorted(set(result)) # def __getattr__(self, name): # \"\"\" getattr and", "disp = SseqDisplay(name) await disp.load_a() return disp.chart @collect_handlers(\"message_handlers\") class SseqDisplay: \"\"\" A Spectral", "__repr__(self): if self._started: return f'{type(self).__name__}(name=\"{self.name}\", url=\"{self.url}\", chart={self.chart})' return f\"\"\"{type(self).__name__}(name=\"{self.name}\", state=\"Not started, run 'await", "func_type_name == \"function\": wrapped = _wrap_chart_func(func) elif func_type_name == \"property\": wrapped_fget = None", "chart = chart or SseqChart(name) self.set_sseq(chart) self.subscribers = {} SseqDisplay.displays[name] = self from", "JSON from working_directory import get_working_directory_a, set_working_directory_a from functools import wraps from repl.handler_decorator import", "def load_display_a(name): disp = SseqDisplay(name) await disp.load_a() return disp.chart @collect_handlers(\"message_handlers\") class SseqDisplay: \"\"\"", "the chart into a variable and use it directly. \"\"\" # displays =", "== \"property\": wrapped_fget = None wrapped_fset = None wrapped_fdel = None if func.fget:", "= await working_directory.path(path).resolve_file_handle_a(create=True) else: self.save_file_handle = FileHandle() await self.save_a() async def load_a(self, path", "await get_working_directory_a() if not working_directory: raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a(create=True) else: self.save_file_handle", "create_display(name): disp = SseqDisplay(name) print(f\"Creating display at {disp.url}\") return disp.chart async def load_display_a(name):", "await self.maybe_autosave_a() async def maybe_autosave_a(self): if self.autosave and self.save_file_handle.is_open(): await self.save_a() async def", "messages) await self.maybe_autosave_a() async def maybe_autosave_a(self): if self.autosave and self.save_file_handle.is_open(): await self.save_a() async", "self.save_file_handle = FileHandle() await self.save_file_handle.open_a() self.set_sseq(JSON.parse(await self.save_file_handle.read_text_a())) await self.reset_state_a() @staticmethod def dispatch_message(obj): message", "self, uuid=uuid, port=port, client_id=client_id, **kwargs )) @staticmethod def _create_message(cmd, **kwargs): return JSON.stringify(dict(cmd=cmd, args=[],", "= _wrap_chart_func(func.fget) if func.fset: wrapped_fset = _wrap_chart_func(func.fset) if func.fdel: wrapped_fdel = _wrap_chart_func(func.fdel) wrapped", "with self.chart._batched_messages_lock: self.chart._clear_batched_messages() await self.send_message_a(\"chart.state.reset\", state = self.chart.to_json()) await self.maybe_autosave_a() def update(self): self.executor.loop.call_soon(self.update_a())", "out of date. # So let's send an update to the existing charts", "))) @handle(\"new_user\") async def new_user__a(self, uuid, port, client_id): print(\"Handling new user...\") # Might", "we don't have other charts that are out of date. # So let's", "let's send an update to the existing charts first. await self.update_a() self.subscribers[client_id] =", "wrapped_fset = None wrapped_fdel = None if func.fget: wrapped_fget = _wrap_chart_func(func.fget) if func.fset:", "body = await response.json() print(f'Display started. Visit \"{self.url}\" to view.') async def reset_state_a(self):", "**kwargs) for port in self.subscribers.values(): port.postMessage(message) async def send_message_to_target_client_a(self, port, cmd, uuid, **kwargs):", "else: self.save_file_handle = FileHandle() await self.save_file_handle.open_a() self.set_sseq(JSON.parse(await self.save_file_handle.read_text_a())) await self.reset_state_a() @staticmethod def dispatch_message(obj):", "args=[], kwargs=kwargs)) async def send_message_a(self, cmd, **kwargs): message = SseqDisplay._create_message(cmd, **kwargs) for port", "to provide good docs for the SseqChart functions. \"\"\" # if not hasattr(self.chart,", "RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a() else: self.save_file_handle = FileHandle() await self.save_file_handle.open_a() self.set_sseq(JSON.parse(await self.save_file_handle.read_text_a()))", "self.executor = Executor.executor self._started = False self.executor.loop.call_soon(self.start_a()) def __repr__(self): if self._started: return f'{type(self).__name__}(name=\"{self.name}\",", "raise Exception(f\"Failed to create chart: {response.status_text}\") body = await response.json() print(f'Display started. Visit", "hasattr(self.chart, name): # raise AttributeError(f'Instance of {self.__class__.__name__} has no attribute {name}') # return", "class SseqDisplay: \"\"\" A Spectral Sequence display. This contains the logic to communicate", "type(json_obj) is str: json_obj = json.loads(json_obj) self.set_sseq(SseqChart.from_json(json_obj)) def set_sseq(self, chart): if self.chart is", "self @property def url(self): directory = str(pathlib.Path(location.pathname).parent) return f\"{location.protocol}//{location.host}{directory}/charts/{self.name}\" async def start_a(self): if", "want to store the chart into a variable and use it directly. \"\"\"", "self.chart._clear_batched_messages() await self.send_message_a(\"chart.state.reset\", state = self.chart.to_json()) await self.maybe_autosave_a() def update(self): self.executor.loop.call_soon(self.update_a()) async def", "async def reset_state_a(self): with self.chart._batched_messages_lock: self.chart._clear_batched_messages() await self.send_message_a(\"chart.state.reset\", state = self.chart.to_json()) await self.maybe_autosave_a()", "Might as well make sure that we don't have other charts that are", "= None self.save_file_handle = FileHandle() self.autosave = False chart = chart or SseqChart(name)", "FileHandle() await self.save_file_handle.open_a() self.set_sseq(JSON.parse(await self.save_file_handle.read_text_a())) await self.reset_state_a() @staticmethod def dispatch_message(obj): message = json.loads(obj[\"message\"])", "save_as_a(self, path = None): if path: working_directory = await get_working_directory_a() if not working_directory:", "= json.loads(obj[\"message\"]) del obj[\"message\"] message.update(obj) chart_name = message[\"chart_name\"] del message[\"chart_name\"] display = SseqDisplay.displays[chart_name]", "await self.save_file_handle.ensure_open_a(modify=True) await self.save_file_handle.write_text_a(JSON.stringify(self.chart)) async def save_as_a(self, path = None): if path: working_directory", "for methods too, it screws up jedi get_signatures. # # So __dir__ /", "into a variable and use it directly. \"\"\" # displays = {} def", "enough to get docs for properties. # # For properties, we copy a", "send_message_to_target_client_a(self, port, cmd, uuid, **kwargs): port.postMessage(JSON.stringify(dict( cmd=cmd, uuid=uuid, args=[], kwargs=kwargs ))) @handle(\"new_user\") async", "await working_directory.path(path).resolve_file_handle_a() else: self.save_file_handle = FileHandle() await self.save_file_handle.open_a() self.set_sseq(JSON.parse(await self.save_file_handle.read_text_a())) await self.reset_state_a() @staticmethod", "uuid, **kwargs): port.postMessage(JSON.stringify(dict( cmd=cmd, uuid=uuid, args=[], kwargs=kwargs ))) @handle(\"new_user\") async def new_user__a(self, uuid,", "{disp.url}\") return disp.chart async def load_display_a(name): disp = SseqDisplay(name) await disp.load_a() return disp.chart", "self.set_sseq(chart) self.subscribers = {} SseqDisplay.displays[name] = self from repl.executor import Executor self.executor =", "= await get_working_directory_a() if not working_directory: raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a() else:", "disp = SseqDisplay(name) print(f\"Creating display at {disp.url}\") return disp.chart async def load_display_a(name): disp", "js_wrappers.async_js import Fetcher from js_wrappers.filesystem import FileHandle import json import pathlib from spectralsequence_chart", "def maybe_autosave_a(self): if self.autosave and self.save_file_handle.is_open(): await self.save_a() async def save_a(self): await self.save_file_handle.ensure_open_a(modify=True)", "print(f\"Creating display at {disp.url}\") return disp.chart async def load_display_a(name): disp = SseqDisplay(name) await", "return disp.chart @collect_handlers(\"message_handlers\") class SseqDisplay: \"\"\" A Spectral Sequence display. This contains the", "provide good docs for the SseqChart functions. \"\"\" # if not hasattr(self.chart, name):", "url(self): directory = str(pathlib.Path(location.pathname).parent) return f\"{location.protocol}//{location.host}{directory}/charts/{self.name}\" async def start_a(self): if self._started: return self._started", "= getattr(SseqChart, name) func_type_name = type(func).__name__ if func_type_name == \"function\": wrapped = _wrap_chart_func(func)", "SseqDisplay(name) print(f\"Creating display at {disp.url}\") return disp.chart async def load_display_a(name): disp = SseqDisplay(name)", "a in dir(SseqChart): # if a.startswith(\"_\") or a in dir(SseqDisplay): # continue #", "does. port.postMessage(SseqDisplay._create_message(\"chart.state.initialize\", state = self.chart.to_json())) @handle(\"initialize.complete\") async def initialize__complete__a(self, uuid, port, client_id): print(\"initialize.complete\")", "not working_directory: raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a() else: self.save_file_handle = FileHandle() await", "repl.executor import Executor self.executor = Executor.executor self._started = False self.executor.loop.call_soon(self.start_a()) def __repr__(self): if", "def initialize__complete__a(self, uuid, port, client_id): print(\"initialize.complete\") def _wrap_chart_func(func): @wraps(func) def wrap(self, *args, **kwargs):", "is being displayed. You may want to store the chart into a variable", "message[\"chart_name\"] display = SseqDisplay.displays[chart_name] display.handle_message(**message) def handle_message(self, cmd, args, port, client_id, uuid, kwargs):", "{} SseqDisplay.displays[name] = self from repl.executor import Executor self.executor = Executor.executor self._started =", "if self.autosave and self.save_file_handle.is_open(): await self.save_a() async def save_a(self): await self.save_file_handle.ensure_open_a(modify=True) await self.save_file_handle.write_text_a(JSON.stringify(self.chart))", "setattr(SseqDisplay, name, wrapped) # for a in dir(SseqChart): # if a.startswith(\"_\") or a", "chart): if self.chart is not None: self.chart._agent = None self.chart = chart self.chart._agent", "= name self.chart = None self.save_file_handle = FileHandle() self.autosave = False chart =", "func.fdel: wrapped_fdel = _wrap_chart_func(func.fdel) wrapped = property(wrapped_fget, wrapped_fset, wrapped_fdel) else: raise AssertionError() setattr(SseqDisplay,", "to view.') async def reset_state_a(self): with self.chart._batched_messages_lock: self.chart._clear_batched_messages() await self.send_message_a(\"chart.state.reset\", state = self.chart.to_json())", "self.save_file_handle.is_open(): await self.save_a() async def save_a(self): await self.save_file_handle.ensure_open_a(modify=True) await self.save_file_handle.write_text_a(JSON.stringify(self.chart)) async def save_as_a(self,", "to store the chart into a variable and use it directly. \"\"\" #", "= None wrapped_fdel = None if func.fget: wrapped_fget = _wrap_chart_func(func.fget) if func.fset: wrapped_fset", "SseqDisplay. # # Note that if we do this for methods too, it", "self.executor.loop.call_soon(self.update_a()) async def update_a(self): await self.chart.update_a() async def send_batched_messages_a(self, messages): console.log(\"Sending batched messages:\",", "port=port, client_id=client_id, **kwargs )) @staticmethod def _create_message(cmd, **kwargs): return JSON.stringify(dict(cmd=cmd, args=[], kwargs=kwargs)) async", "from repl.executor import Executor self.executor = Executor.executor self._started = False self.executor.loop.call_soon(self.start_a()) def __repr__(self):", "fetcher.put(f\"charts/{self.name}\", {}) if response.status >= 400: raise Exception(f\"Failed to create chart: {response.status_text}\") body", "# raise AttributeError(f'Instance of {self.__class__.__name__} has no attribute {name}') # return getattr(self.chart, name)", "The __getattr__ and __dir__ methods above aren't enough to get docs for properties.", "location, console ) from js_wrappers.async_js import Fetcher from js_wrappers.filesystem import FileHandle import json", "SseqDisplay._create_message(cmd, **kwargs) for port in self.subscribers.values(): port.postMessage(message) async def send_message_to_target_client_a(self, port, cmd, uuid,", "response.status >= 400: raise Exception(f\"Failed to create chart: {response.status_text}\") body = await response.json()", "# result = self.chart.__dir__() # result.extend(self.__dict__.keys()) # return sorted(set(result)) # def __getattr__(self, name):", "do this for methods too, it screws up jedi get_signatures. # # So", "and this works only for properties... # if type(getattr(SseqChart, a)) is property: #", "from functools import wraps from repl.handler_decorator import collect_handlers, handle fetcher = Fetcher(\"api/\") def", "await self.save_a() async def save_a(self): await self.save_file_handle.ensure_open_a(modify=True) await self.save_file_handle.write_text_a(JSON.stringify(self.chart)) async def save_as_a(self, path", "js import ( location, console ) from js_wrappers.async_js import Fetcher from js_wrappers.filesystem import", "state=\"Not started, run 'await display.start_a()' to start.\")\"\"\" # def __dir__(self): # \"\"\" getattr", "messages) await self.send_message_a(\"chart.update\", messages = messages) await self.maybe_autosave_a() async def maybe_autosave_a(self): if self.autosave", "or a in dir(SseqDisplay): # continue # # The __getattr__ and __dir__ methods", "def save_a(self): await self.save_file_handle.ensure_open_a(modify=True) await self.save_file_handle.write_text_a(JSON.stringify(self.chart)) async def save_as_a(self, path = None): if", "= FileHandle() self.autosave = False chart = chart or SseqChart(name) self.set_sseq(chart) self.subscribers =", "browser. All of the data is contained in the field SseqDisplay.chart which is", "if func_type_name == \"function\": wrapped = _wrap_chart_func(func) elif func_type_name == \"property\": wrapped_fget =", "func.fset: wrapped_fset = _wrap_chart_func(func.fset) if func.fdel: wrapped_fdel = _wrap_chart_func(func.fdel) wrapped = property(wrapped_fget, wrapped_fset,", "port, client_id, uuid, kwargs): kwargs = dict(kwargs) console.log(f\"SseqDisplay.handle_message({cmd}, {JSON.stringify(kwargs)})\") self.executor.loop.call_soon(self.message_handlers[cmd]( self, uuid=uuid, port=port,", "wrapped_fdel) else: raise AssertionError() setattr(SseqDisplay, name, wrapped) # for a in dir(SseqChart): #", "have to be set up carefully to allow jedi to provide good docs", "if type(json_obj) is str: json_obj = json.loads(json_obj) self.set_sseq(SseqChart.from_json(json_obj)) def set_sseq(self, chart): if self.chart", "self.chart.update_a() async def send_batched_messages_a(self, messages): console.log(\"Sending batched messages:\", messages) await self.send_message_a(\"chart.update\", messages =", "handle fetcher = Fetcher(\"api/\") def create_display(name): disp = SseqDisplay(name) print(f\"Creating display at {disp.url}\")", "port.postMessage(SseqDisplay._create_message(\"chart.state.initialize\", state = self.chart.to_json())) @handle(\"initialize.complete\") async def initialize__complete__a(self, uuid, port, client_id): print(\"initialize.complete\") def", "user...\") # Might as well make sure that we don't have other charts", "{self.__class__.__name__} has no attribute {name}') # return getattr(self.chart, name) def load_json(self, json_obj): if", "# # So __dir__ / __getattr__ work only for methods and this works", "wrapped_fdel = None if func.fget: wrapped_fget = _wrap_chart_func(func.fget) if func.fset: wrapped_fset = _wrap_chart_func(func.fset)", "to SseqDisplay. # # Note that if we do this for methods too,", "self.save_a() async def load_a(self, path = None): if path: working_directory = await get_working_directory_a()", "= False self.executor.loop.call_soon(self.start_a()) def __repr__(self): if self._started: return f'{type(self).__name__}(name=\"{self.name}\", url=\"{self.url}\", chart={self.chart})' return f\"\"\"{type(self).__name__}(name=\"{self.name}\",", "return wrap def _bind_chart_attribute(name): func = getattr(SseqChart, name) func_type_name = type(func).__name__ if func_type_name", "from js import ( location, console ) from js_wrappers.async_js import Fetcher from js_wrappers.filesystem", "Executor.executor self._started = False self.executor.loop.call_soon(self.start_a()) def __repr__(self): if self._started: return f'{type(self).__name__}(name=\"{self.name}\", url=\"{self.url}\", chart={self.chart})'", "working_directory: raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a() else: self.save_file_handle = FileHandle() await self.save_file_handle.open_a()", "f\"{location.protocol}//{location.host}{directory}/charts/{self.name}\" async def start_a(self): if self._started: return self._started = True response = await", "in dir(SseqDisplay): # continue # # The __getattr__ and __dir__ methods above aren't", "def wrap(self, *args, **kwargs): return func(self.chart, *args, **kwargs) return wrap def _bind_chart_attribute(name): func", "self.save_file_handle = FileHandle() await self.save_a() async def load_a(self, path = None): if path:", "working_directory = await get_working_directory_a() if not working_directory: raise RuntimeError(\"...\") self.save_file_handle = await working_directory.path(path).resolve_file_handle_a(create=True)", "Sequence display. This contains the logic to communicate between the SseqChart and the", "# \"initialize\" does a superset of what \"reset\" does. port.postMessage(SseqDisplay._create_message(\"chart.state.initialize\", state = self.chart.to_json()))", "object that is being displayed. You may want to store the chart into", "of what \"reset\" does. port.postMessage(SseqDisplay._create_message(\"chart.state.initialize\", state = self.chart.to_json())) @handle(\"initialize.complete\") async def initialize__complete__a(self, uuid,", "def dispatch_message(obj): message = json.loads(obj[\"message\"]) del obj[\"message\"] message.update(obj) chart_name = message[\"chart_name\"] del message[\"chart_name\"]", "cmd, uuid, **kwargs): port.postMessage(JSON.stringify(dict( cmd=cmd, uuid=uuid, args=[], kwargs=kwargs ))) @handle(\"new_user\") async def new_user__a(self,", "= None): if path: working_directory = await get_working_directory_a() if not working_directory: raise RuntimeError(\"...\")", "def send_message_to_target_client_a(self, port, cmd, uuid, **kwargs): port.postMessage(JSON.stringify(dict( cmd=cmd, uuid=uuid, args=[], kwargs=kwargs ))) @handle(\"new_user\")", "async def send_message_to_target_client_a(self, port, cmd, uuid, **kwargs): port.postMessage(JSON.stringify(dict( cmd=cmd, uuid=uuid, args=[], kwargs=kwargs )))" ]
[ "import Auth class Summary(Auth): _endpoint1 = \"zones\" _endpoint2 = \"spectrum/analytics/events/summary\" _endpoint3 = None", "aiocloudflare.commons.auth import Auth class Summary(Auth): _endpoint1 = \"zones\" _endpoint2 = \"spectrum/analytics/events/summary\" _endpoint3 =", "from aiocloudflare.commons.auth import Auth class Summary(Auth): _endpoint1 = \"zones\" _endpoint2 = \"spectrum/analytics/events/summary\" _endpoint3" ]
[ "from typing import List, NamedTuple, Tuple, Set def extract(s): return [int(x) for x", "abs(x[2] - y[2]) @dataclass(frozen=True, order=True) class Box: bot: Pos top: Pos def split_at(self,", "check(fns, {m}) return fns, n def check(lhs, rhs): return for x in lhs:", "new_on = set() for x in on: splits, _ = splitoff(x, box) new_on.update(splits)", "(self.top.x-self.bot.x+1)*(self.top.y-self.bot.y+1)*(self.top.z-self.bot.z+1) def splitoff(n, m): if not n.overlap(m): return [n], None on = n", "hi)) in enumerate(cmds): box = Box(Pos(*lo), Pos(*hi)) print(i, cmd, box) new_on = set()", "also compute part 1 part1 = { z for x in on if", "] def overlap(self, other): return ( self.top.x >= other.bot.x and other.top.x >= self.bot.x", "= Box(Pos(*lo), Pos(*hi)) print(i, cmd, box) new_on = set() for x in on:", "atop != abot axname = \"xyz\"[axis] return [ Box(self.bot, self.top._replace(**{axname: mid})), Box(self.bot._replace(**{axname: mid+1}),", "mid})), Box(self.bot._replace(**{axname: mid+1}), self.top), ] def overlap(self, other): return ( self.top.x >= other.bot.x", "fun, also compute part 1 part1 = { z for x in on", "n.split_at(axis, m.top[axis]+1) fns.append(l) assert sum(x.volume() for x in fns) + n.volume() == on.volume()", "sum(x.volume() for x in fns) + n.volume() == on.volume() check(fns, {m}) return fns,", "#!/usr/bin/env python3 from __future__ import annotations import sys import re from dataclasses import", "[n], None on = n fns = [] for axis in [0,1,2]: if", "axis, val): atop = self.top[axis] abot = self.bot[axis] mid = val - 1", "return [n], None on = n fns = [] for axis in [0,1,2]:", "in sys.stdin if s.strip() and not s.startswith('#')] cmds = [(s.split(\" \")[0], extract(s)) for", "self.top.z >= other.bot.z and other.top.z >= self.bot.z ) def volume(self): assert self.top.x-self.bot.x >=", "!= abot axname = \"xyz\"[axis] return [ Box(self.bot, self.top._replace(**{axname: mid})), Box(self.bot._replace(**{axname: mid+1}), self.top),", "= [] for axis in [0,1,2]: if n.bot[axis] < m.bot[axis] <= n.top[axis]: l,", "and other.top.z >= self.bot.z ) def volume(self): assert self.top.x-self.bot.x >= 0 assert self.top.y-self.bot.y", "self.top.x-self.bot.x >= 0 assert self.top.y-self.bot.y >= 0 assert self.top.z-self.bot.z >= 0 return (self.top.x-self.bot.x+1)*(self.top.y-self.bot.y+1)*(self.top.z-self.bot.z+1)", "class Box: bot: Pos top: Pos def split_at(self, axis, val): atop = self.top[axis]", "def dist(self, y: Pos) -> int: x = self return abs(x[0] - y[0])", "- y[1]) + abs(x[2] - y[2]) @dataclass(frozen=True, order=True) class Box: bot: Pos top:", "y in rhs: assert x == y or not x.overlap(y), (x,y) def main(args):", "self.top), ] def overlap(self, other): return ( self.top.x >= other.bot.x and other.top.x >=", "abot axname = \"xyz\"[axis] return [ Box(self.bot, self.top._replace(**{axname: mid})), Box(self.bot._replace(**{axname: mid+1}), self.top), ]", "x in on: splits, _ = splitoff(x, box) new_on.update(splits) check(new_on, {box}) if cmd", "enumerate(cmds): box = Box(Pos(*lo), Pos(*hi)) print(i, cmd, box) new_on = set() for x", "x[5]))) for s, x in cmds] on = set() for i, (cmd, (lo,", "extract(s)) for s in data] cmds = [(s, ((x[0], x[2], x[4]), (x[1], x[3],", "= [(s, ((x[0], x[2], x[4]), (x[1], x[3], x[5]))) for s, x in cmds]", "= \"xyz\"[axis] return [ Box(self.bot, self.top._replace(**{axname: mid})), Box(self.bot._replace(**{axname: mid+1}), self.top), ] def overlap(self,", "x = self return abs(x[0] - y[0]) + abs(x[1] - y[1]) + abs(x[2]", "check(lhs, rhs): return for x in lhs: for y in rhs: assert x", "[] for axis in [0,1,2]: if n.bot[axis] < m.bot[axis] <= n.top[axis]: l, n", "class Pos(NamedTuple): x: int y: int z: int def dist(self, y: Pos) ->", "= set() for i, (cmd, (lo, hi)) in enumerate(cmds): box = Box(Pos(*lo), Pos(*hi))", "splitoff(x, Box(Pos(-50,-50,-50), Pos(50,50,50)))[1]) } print(sum(x.volume() for x in part1)) print(sum(x.volume() for x in", "lhs: for y in rhs: assert x == y or not x.overlap(y), (x,y)", "(x[1], x[3], x[5]))) for s, x in cmds] on = set() for i,", "axis in [0,1,2]: if n.bot[axis] < m.bot[axis] <= n.top[axis]: l, n = n.split_at(axis,", "[ Box(self.bot, self.top._replace(**{axname: mid})), Box(self.bot._replace(**{axname: mid+1}), self.top), ] def overlap(self, other): return (", "mid = val - 1 assert mid >= abot assert atop != abot", "return [ Box(self.bot, self.top._replace(**{axname: mid})), Box(self.bot._replace(**{axname: mid+1}), self.top), ] def overlap(self, other): return", "cmd == \"on\": new_on |= {box} on = new_on check(on, on) print(i, sum(x.volume()", "part 1 part1 = { z for x in on if (z :=", "def splitoff(n, m): if not n.overlap(m): return [n], None on = n fns", "fns.append(l) assert sum(x.volume() for x in fns) + n.volume() == on.volume() check(fns, {m})", "other.bot.z and other.top.z >= self.bot.z ) def volume(self): assert self.top.x-self.bot.x >= 0 assert", "in re.findall(r'(-?\\d+).?', s)] class Pos(NamedTuple): x: int y: int z: int def dist(self,", "int def dist(self, y: Pos) -> int: x = self return abs(x[0] -", "other): return ( self.top.x >= other.bot.x and other.top.x >= self.bot.x and self.top.y >=", "for i, (cmd, (lo, hi)) in enumerate(cmds): box = Box(Pos(*lo), Pos(*hi)) print(i, cmd,", "val): atop = self.top[axis] abot = self.bot[axis] mid = val - 1 assert", "x[3], x[5]))) for s, x in cmds] on = set() for i, (cmd,", "= self.top[axis] abot = self.bot[axis] mid = val - 1 assert mid >=", "overlap(self, other): return ( self.top.x >= other.bot.x and other.top.x >= self.bot.x and self.top.y", "for s, x in cmds] on = set() for i, (cmd, (lo, hi))", "and other.top.x >= self.bot.x and self.top.y >= other.bot.y and other.top.y >= self.bot.y and", "self.bot.x and self.top.y >= other.bot.y and other.top.y >= self.bot.y and self.top.z >= other.bot.z", "return for x in lhs: for y in rhs: assert x == y", "in cmds] on = set() for i, (cmd, (lo, hi)) in enumerate(cmds): box", "print(i, cmd, box) new_on = set() for x in on: splits, _ =", "[int(x) for x in re.findall(r'(-?\\d+).?', s)] class Pos(NamedTuple): x: int y: int z:", "Pos def split_at(self, axis, val): atop = self.top[axis] abot = self.bot[axis] mid =", "import sys import re from dataclasses import dataclass from typing import List, NamedTuple,", "[s.strip() for s in sys.stdin if s.strip() and not s.startswith('#')] cmds = [(s.split(\"", "= self.bot[axis] mid = val - 1 assert mid >= abot assert atop", "if cmd == \"on\": new_on |= {box} on = new_on check(on, on) print(i,", "on), len(on)) # just for fun, also compute part 1 part1 = {", "set() for x in on: splits, _ = splitoff(x, box) new_on.update(splits) check(new_on, {box})", "in on if (z := splitoff(x, Box(Pos(-50,-50,-50), Pos(50,50,50)))[1]) } print(sum(x.volume() for x in", "on: splits, _ = splitoff(x, box) new_on.update(splits) check(new_on, {box}) if cmd == \"on\":", "} print(sum(x.volume() for x in part1)) print(sum(x.volume() for x in on)) if __name__", "return ( self.top.x >= other.bot.x and other.top.x >= self.bot.x and self.top.y >= other.bot.y", "[(s.split(\" \")[0], extract(s)) for s in data] cmds = [(s, ((x[0], x[2], x[4]),", "0 assert self.top.y-self.bot.y >= 0 assert self.top.z-self.bot.z >= 0 return (self.top.x-self.bot.x+1)*(self.top.y-self.bot.y+1)*(self.top.z-self.bot.z+1) def splitoff(n,", "{box} on = new_on check(on, on) print(i, sum(x.volume() for x in on), len(on))", "int y: int z: int def dist(self, y: Pos) -> int: x =", "splits, _ = splitoff(x, box) new_on.update(splits) check(new_on, {box}) if cmd == \"on\": new_on", "s in sys.stdin if s.strip() and not s.startswith('#')] cmds = [(s.split(\" \")[0], extract(s))", "< m.bot[axis] <= n.top[axis]: l, n = n.split_at(axis, m.bot[axis]) fns.append(l) if n.bot[axis] <=", "y: Pos) -> int: x = self return abs(x[0] - y[0]) + abs(x[1]", "n.volume() == on.volume() check(fns, {m}) return fns, n def check(lhs, rhs): return for", "x[2], x[4]), (x[1], x[3], x[5]))) for s, x in cmds] on = set()", "dataclass from typing import List, NamedTuple, Tuple, Set def extract(s): return [int(x) for", "sys import re from dataclasses import dataclass from typing import List, NamedTuple, Tuple,", "+ abs(x[1] - y[1]) + abs(x[2] - y[2]) @dataclass(frozen=True, order=True) class Box: bot:", "or not x.overlap(y), (x,y) def main(args): data = [s.strip() for s in sys.stdin", "for axis in [0,1,2]: if n.bot[axis] < m.bot[axis] <= n.top[axis]: l, n =", "if s.strip() and not s.startswith('#')] cmds = [(s.split(\" \")[0], extract(s)) for s in", "<= m.top[axis] < n.top[axis]: n, l = n.split_at(axis, m.top[axis]+1) fns.append(l) assert sum(x.volume() for", "= set() for x in on: splits, _ = splitoff(x, box) new_on.update(splits) check(new_on,", "from dataclasses import dataclass from typing import List, NamedTuple, Tuple, Set def extract(s):", "other.bot.y and other.top.y >= self.bot.y and self.top.z >= other.bot.z and other.top.z >= self.bot.z", "(lo, hi)) in enumerate(cmds): box = Box(Pos(*lo), Pos(*hi)) print(i, cmd, box) new_on =", "@dataclass(frozen=True, order=True) class Box: bot: Pos top: Pos def split_at(self, axis, val): atop", "cmds] on = set() for i, (cmd, (lo, hi)) in enumerate(cmds): box =", "s in data] cmds = [(s, ((x[0], x[2], x[4]), (x[1], x[3], x[5]))) for", "for x in fns) + n.volume() == on.volume() check(fns, {m}) return fns, n", "assert self.top.x-self.bot.x >= 0 assert self.top.y-self.bot.y >= 0 assert self.top.z-self.bot.z >= 0 return", "in data] cmds = [(s, ((x[0], x[2], x[4]), (x[1], x[3], x[5]))) for s,", "import dataclass from typing import List, NamedTuple, Tuple, Set def extract(s): return [int(x)", "= n.split_at(axis, m.top[axis]+1) fns.append(l) assert sum(x.volume() for x in fns) + n.volume() ==", "= [s.strip() for s in sys.stdin if s.strip() and not s.startswith('#')] cmds =", "|= {box} on = new_on check(on, on) print(i, sum(x.volume() for x in on),", "data] cmds = [(s, ((x[0], x[2], x[4]), (x[1], x[3], x[5]))) for s, x", ">= other.bot.y and other.top.y >= self.bot.y and self.top.z >= other.bot.z and other.top.z >=", "n.top[axis]: l, n = n.split_at(axis, m.bot[axis]) fns.append(l) if n.bot[axis] <= m.top[axis] < n.top[axis]:", "def check(lhs, rhs): return for x in lhs: for y in rhs: assert", "rhs): return for x in lhs: for y in rhs: assert x ==", "assert self.top.z-self.bot.z >= 0 return (self.top.x-self.bot.x+1)*(self.top.y-self.bot.y+1)*(self.top.z-self.bot.z+1) def splitoff(n, m): if not n.overlap(m): return", "on = set() for i, (cmd, (lo, hi)) in enumerate(cmds): box = Box(Pos(*lo),", "= new_on check(on, on) print(i, sum(x.volume() for x in on), len(on)) # just", "in lhs: for y in rhs: assert x == y or not x.overlap(y),", "in on), len(on)) # just for fun, also compute part 1 part1 =", "s, x in cmds] on = set() for i, (cmd, (lo, hi)) in", "(z := splitoff(x, Box(Pos(-50,-50,-50), Pos(50,50,50)))[1]) } print(sum(x.volume() for x in part1)) print(sum(x.volume() for", "self.top._replace(**{axname: mid})), Box(self.bot._replace(**{axname: mid+1}), self.top), ] def overlap(self, other): return ( self.top.x >=", "\")[0], extract(s)) for s in data] cmds = [(s, ((x[0], x[2], x[4]), (x[1],", "import List, NamedTuple, Tuple, Set def extract(s): return [int(x) for x in re.findall(r'(-?\\d+).?',", "< n.top[axis]: n, l = n.split_at(axis, m.top[axis]+1) fns.append(l) assert sum(x.volume() for x in", "1 assert mid >= abot assert atop != abot axname = \"xyz\"[axis] return", "n fns = [] for axis in [0,1,2]: if n.bot[axis] < m.bot[axis] <=", "= { z for x in on if (z := splitoff(x, Box(Pos(-50,-50,-50), Pos(50,50,50)))[1])", "m.bot[axis] <= n.top[axis]: l, n = n.split_at(axis, m.bot[axis]) fns.append(l) if n.bot[axis] <= m.top[axis]", "y[1]) + abs(x[2] - y[2]) @dataclass(frozen=True, order=True) class Box: bot: Pos top: Pos", "s.strip() and not s.startswith('#')] cmds = [(s.split(\" \")[0], extract(s)) for s in data]", "rhs: assert x == y or not x.overlap(y), (x,y) def main(args): data =", "x in re.findall(r'(-?\\d+).?', s)] class Pos(NamedTuple): x: int y: int z: int def", "other.bot.x and other.top.x >= self.bot.x and self.top.y >= other.bot.y and other.top.y >= self.bot.y", "n = n.split_at(axis, m.bot[axis]) fns.append(l) if n.bot[axis] <= m.top[axis] < n.top[axis]: n, l", "l, n = n.split_at(axis, m.bot[axis]) fns.append(l) if n.bot[axis] <= m.top[axis] < n.top[axis]: n,", "n.top[axis]: n, l = n.split_at(axis, m.top[axis]+1) fns.append(l) assert sum(x.volume() for x in fns)", "if (z := splitoff(x, Box(Pos(-50,-50,-50), Pos(50,50,50)))[1]) } print(sum(x.volume() for x in part1)) print(sum(x.volume()", "on) print(i, sum(x.volume() for x in on), len(on)) # just for fun, also", "mid >= abot assert atop != abot axname = \"xyz\"[axis] return [ Box(self.bot,", "import annotations import sys import re from dataclasses import dataclass from typing import", "abs(x[1] - y[1]) + abs(x[2] - y[2]) @dataclass(frozen=True, order=True) class Box: bot: Pos", "return (self.top.x-self.bot.x+1)*(self.top.y-self.bot.y+1)*(self.top.z-self.bot.z+1) def splitoff(n, m): if not n.overlap(m): return [n], None on =", "x in cmds] on = set() for i, (cmd, (lo, hi)) in enumerate(cmds):", "z: int def dist(self, y: Pos) -> int: x = self return abs(x[0]", ">= 0 assert self.top.z-self.bot.z >= 0 return (self.top.x-self.bot.x+1)*(self.top.y-self.bot.y+1)*(self.top.z-self.bot.z+1) def splitoff(n, m): if not", "__future__ import annotations import sys import re from dataclasses import dataclass from typing", "sys.stdin if s.strip() and not s.startswith('#')] cmds = [(s.split(\" \")[0], extract(s)) for s", "cmds = [(s, ((x[0], x[2], x[4]), (x[1], x[3], x[5]))) for s, x in", "None on = n fns = [] for axis in [0,1,2]: if n.bot[axis]", "box) new_on.update(splits) check(new_on, {box}) if cmd == \"on\": new_on |= {box} on =", "splitoff(n, m): if not n.overlap(m): return [n], None on = n fns =", "= n fns = [] for axis in [0,1,2]: if n.bot[axis] < m.bot[axis]", "Set def extract(s): return [int(x) for x in re.findall(r'(-?\\d+).?', s)] class Pos(NamedTuple): x:", "+ abs(x[2] - y[2]) @dataclass(frozen=True, order=True) class Box: bot: Pos top: Pos def", "and other.top.y >= self.bot.y and self.top.z >= other.bot.z and other.top.z >= self.bot.z )", "and self.top.y >= other.bot.y and other.top.y >= self.bot.y and self.top.z >= other.bot.z and", "n.overlap(m): return [n], None on = n fns = [] for axis in", "cmds = [(s.split(\" \")[0], extract(s)) for s in data] cmds = [(s, ((x[0],", "atop = self.top[axis] abot = self.bot[axis] mid = val - 1 assert mid", "== on.volume() check(fns, {m}) return fns, n def check(lhs, rhs): return for x", "val - 1 assert mid >= abot assert atop != abot axname =", "new_on.update(splits) check(new_on, {box}) if cmd == \"on\": new_on |= {box} on = new_on", "Box(self.bot, self.top._replace(**{axname: mid})), Box(self.bot._replace(**{axname: mid+1}), self.top), ] def overlap(self, other): return ( self.top.x", "fns, n def check(lhs, rhs): return for x in lhs: for y in", "check(new_on, {box}) if cmd == \"on\": new_on |= {box} on = new_on check(on,", "in [0,1,2]: if n.bot[axis] < m.bot[axis] <= n.top[axis]: l, n = n.split_at(axis, m.bot[axis])", "def overlap(self, other): return ( self.top.x >= other.bot.x and other.top.x >= self.bot.x and", "new_on |= {box} on = new_on check(on, on) print(i, sum(x.volume() for x in", "(x,y) def main(args): data = [s.strip() for s in sys.stdin if s.strip() and", "assert atop != abot axname = \"xyz\"[axis] return [ Box(self.bot, self.top._replace(**{axname: mid})), Box(self.bot._replace(**{axname:", "x in on if (z := splitoff(x, Box(Pos(-50,-50,-50), Pos(50,50,50)))[1]) } print(sum(x.volume() for x", "other.top.z >= self.bot.z ) def volume(self): assert self.top.x-self.bot.x >= 0 assert self.top.y-self.bot.y >=", "s.startswith('#')] cmds = [(s.split(\" \")[0], extract(s)) for s in data] cmds = [(s,", "y or not x.overlap(y), (x,y) def main(args): data = [s.strip() for s in", "re.findall(r'(-?\\d+).?', s)] class Pos(NamedTuple): x: int y: int z: int def dist(self, y:", "for x in on: splits, _ = splitoff(x, box) new_on.update(splits) check(new_on, {box}) if", "and not s.startswith('#')] cmds = [(s.split(\" \")[0], extract(s)) for s in data] cmds", "x in part1)) print(sum(x.volume() for x in on)) if __name__ == '__main__': main(sys.argv)", "= self return abs(x[0] - y[0]) + abs(x[1] - y[1]) + abs(x[2] -", "assert x == y or not x.overlap(y), (x,y) def main(args): data = [s.strip()", "annotations import sys import re from dataclasses import dataclass from typing import List,", "self.bot[axis] mid = val - 1 assert mid >= abot assert atop !=", "== y or not x.overlap(y), (x,y) def main(args): data = [s.strip() for s", "def split_at(self, axis, val): atop = self.top[axis] abot = self.bot[axis] mid = val", "self.bot.z ) def volume(self): assert self.top.x-self.bot.x >= 0 assert self.top.y-self.bot.y >= 0 assert", ":= splitoff(x, Box(Pos(-50,-50,-50), Pos(50,50,50)))[1]) } print(sum(x.volume() for x in part1)) print(sum(x.volume() for x", "abot = self.bot[axis] mid = val - 1 assert mid >= abot assert", "abot assert atop != abot axname = \"xyz\"[axis] return [ Box(self.bot, self.top._replace(**{axname: mid})),", "data = [s.strip() for s in sys.stdin if s.strip() and not s.startswith('#')] cmds", "if n.bot[axis] < m.bot[axis] <= n.top[axis]: l, n = n.split_at(axis, m.bot[axis]) fns.append(l) if", "m.top[axis]+1) fns.append(l) assert sum(x.volume() for x in fns) + n.volume() == on.volume() check(fns,", "for x in on), len(on)) # just for fun, also compute part 1", ">= self.bot.z ) def volume(self): assert self.top.x-self.bot.x >= 0 assert self.top.y-self.bot.y >= 0", "on = n fns = [] for axis in [0,1,2]: if n.bot[axis] <", "x in fns) + n.volume() == on.volume() check(fns, {m}) return fns, n def", "self return abs(x[0] - y[0]) + abs(x[1] - y[1]) + abs(x[2] - y[2])", "python3 from __future__ import annotations import sys import re from dataclasses import dataclass", "-> int: x = self return abs(x[0] - y[0]) + abs(x[1] - y[1])", "self.top.x >= other.bot.x and other.top.x >= self.bot.x and self.top.y >= other.bot.y and other.top.y", "= val - 1 assert mid >= abot assert atop != abot axname", "def volume(self): assert self.top.x-self.bot.x >= 0 assert self.top.y-self.bot.y >= 0 assert self.top.z-self.bot.z >=", ">= 0 assert self.top.y-self.bot.y >= 0 assert self.top.z-self.bot.z >= 0 return (self.top.x-self.bot.x+1)*(self.top.y-self.bot.y+1)*(self.top.z-self.bot.z+1) def", "y: int z: int def dist(self, y: Pos) -> int: x = self", "Box(self.bot._replace(**{axname: mid+1}), self.top), ] def overlap(self, other): return ( self.top.x >= other.bot.x and", "for x in lhs: for y in rhs: assert x == y or", "check(on, on) print(i, sum(x.volume() for x in on), len(on)) # just for fun,", "for s in data] cmds = [(s, ((x[0], x[2], x[4]), (x[1], x[3], x[5])))", "compute part 1 part1 = { z for x in on if (z", "== \"on\": new_on |= {box} on = new_on check(on, on) print(i, sum(x.volume() for", "for x in part1)) print(sum(x.volume() for x in on)) if __name__ == '__main__':", ">= abot assert atop != abot axname = \"xyz\"[axis] return [ Box(self.bot, self.top._replace(**{axname:", "Box(Pos(-50,-50,-50), Pos(50,50,50)))[1]) } print(sum(x.volume() for x in part1)) print(sum(x.volume() for x in on))", "on = new_on check(on, on) print(i, sum(x.volume() for x in on), len(on)) #", "0 return (self.top.x-self.bot.x+1)*(self.top.y-self.bot.y+1)*(self.top.z-self.bot.z+1) def splitoff(n, m): if not n.overlap(m): return [n], None on", "and self.top.z >= other.bot.z and other.top.z >= self.bot.z ) def volume(self): assert self.top.x-self.bot.x", ">= other.bot.x and other.top.x >= self.bot.x and self.top.y >= other.bot.y and other.top.y >=", "print(sum(x.volume() for x in part1)) print(sum(x.volume() for x in on)) if __name__ ==", "fns = [] for axis in [0,1,2]: if n.bot[axis] < m.bot[axis] <= n.top[axis]:", "m): if not n.overlap(m): return [n], None on = n fns = []", "fns) + n.volume() == on.volume() check(fns, {m}) return fns, n def check(lhs, rhs):", "{box}) if cmd == \"on\": new_on |= {box} on = new_on check(on, on)", "x: int y: int z: int def dist(self, y: Pos) -> int: x", "<filename>2021/22b.py #!/usr/bin/env python3 from __future__ import annotations import sys import re from dataclasses", "set() for i, (cmd, (lo, hi)) in enumerate(cmds): box = Box(Pos(*lo), Pos(*hi)) print(i,", "{ z for x in on if (z := splitoff(x, Box(Pos(-50,-50,-50), Pos(50,50,50)))[1]) }", "x in lhs: for y in rhs: assert x == y or not", "x in on), len(on)) # just for fun, also compute part 1 part1", "Tuple, Set def extract(s): return [int(x) for x in re.findall(r'(-?\\d+).?', s)] class Pos(NamedTuple):", "- 1 assert mid >= abot assert atop != abot axname = \"xyz\"[axis]", "on if (z := splitoff(x, Box(Pos(-50,-50,-50), Pos(50,50,50)))[1]) } print(sum(x.volume() for x in part1))", "[0,1,2]: if n.bot[axis] < m.bot[axis] <= n.top[axis]: l, n = n.split_at(axis, m.bot[axis]) fns.append(l)", "m.top[axis] < n.top[axis]: n, l = n.split_at(axis, m.top[axis]+1) fns.append(l) assert sum(x.volume() for x", "print(i, sum(x.volume() for x in on), len(on)) # just for fun, also compute", "fns.append(l) if n.bot[axis] <= m.top[axis] < n.top[axis]: n, l = n.split_at(axis, m.top[axis]+1) fns.append(l)", "_ = splitoff(x, box) new_on.update(splits) check(new_on, {box}) if cmd == \"on\": new_on |=", "main(args): data = [s.strip() for s in sys.stdin if s.strip() and not s.startswith('#')]", "cmd, box) new_on = set() for x in on: splits, _ = splitoff(x,", "in fns) + n.volume() == on.volume() check(fns, {m}) return fns, n def check(lhs,", "self.top.z-self.bot.z >= 0 return (self.top.x-self.bot.x+1)*(self.top.y-self.bot.y+1)*(self.top.z-self.bot.z+1) def splitoff(n, m): if not n.overlap(m): return [n],", "x[4]), (x[1], x[3], x[5]))) for s, x in cmds] on = set() for", "Pos) -> int: x = self return abs(x[0] - y[0]) + abs(x[1] -", "List, NamedTuple, Tuple, Set def extract(s): return [int(x) for x in re.findall(r'(-?\\d+).?', s)]", "y[0]) + abs(x[1] - y[1]) + abs(x[2] - y[2]) @dataclass(frozen=True, order=True) class Box:", "m.bot[axis]) fns.append(l) if n.bot[axis] <= m.top[axis] < n.top[axis]: n, l = n.split_at(axis, m.top[axis]+1)", "split_at(self, axis, val): atop = self.top[axis] abot = self.bot[axis] mid = val -", "not x.overlap(y), (x,y) def main(args): data = [s.strip() for s in sys.stdin if", "l = n.split_at(axis, m.top[axis]+1) fns.append(l) assert sum(x.volume() for x in fns) + n.volume()", "from __future__ import annotations import sys import re from dataclasses import dataclass from", "new_on check(on, on) print(i, sum(x.volume() for x in on), len(on)) # just for", "self.top[axis] abot = self.bot[axis] mid = val - 1 assert mid >= abot", "assert mid >= abot assert atop != abot axname = \"xyz\"[axis] return [", ") def volume(self): assert self.top.x-self.bot.x >= 0 assert self.top.y-self.bot.y >= 0 assert self.top.z-self.bot.z", "def extract(s): return [int(x) for x in re.findall(r'(-?\\d+).?', s)] class Pos(NamedTuple): x: int", "n.split_at(axis, m.bot[axis]) fns.append(l) if n.bot[axis] <= m.top[axis] < n.top[axis]: n, l = n.split_at(axis,", "((x[0], x[2], x[4]), (x[1], x[3], x[5]))) for s, x in cmds] on =", "n def check(lhs, rhs): return for x in lhs: for y in rhs:", "not s.startswith('#')] cmds = [(s.split(\" \")[0], extract(s)) for s in data] cmds =", "s)] class Pos(NamedTuple): x: int y: int z: int def dist(self, y: Pos)", "for x in on if (z := splitoff(x, Box(Pos(-50,-50,-50), Pos(50,50,50)))[1]) } print(sum(x.volume() for", "{m}) return fns, n def check(lhs, rhs): return for x in lhs: for", "for fun, also compute part 1 part1 = { z for x in", "Pos(NamedTuple): x: int y: int z: int def dist(self, y: Pos) -> int:", "NamedTuple, Tuple, Set def extract(s): return [int(x) for x in re.findall(r'(-?\\d+).?', s)] class", "0 assert self.top.z-self.bot.z >= 0 return (self.top.x-self.bot.x+1)*(self.top.y-self.bot.y+1)*(self.top.z-self.bot.z+1) def splitoff(n, m): if not n.overlap(m):", "on.volume() check(fns, {m}) return fns, n def check(lhs, rhs): return for x in", "return [int(x) for x in re.findall(r'(-?\\d+).?', s)] class Pos(NamedTuple): x: int y: int", "# just for fun, also compute part 1 part1 = { z for", "import re from dataclasses import dataclass from typing import List, NamedTuple, Tuple, Set", "= splitoff(x, box) new_on.update(splits) check(new_on, {box}) if cmd == \"on\": new_on |= {box}", "just for fun, also compute part 1 part1 = { z for x", "axname = \"xyz\"[axis] return [ Box(self.bot, self.top._replace(**{axname: mid})), Box(self.bot._replace(**{axname: mid+1}), self.top), ] def", "for s in sys.stdin if s.strip() and not s.startswith('#')] cmds = [(s.split(\" \")[0],", "return fns, n def check(lhs, rhs): return for x in lhs: for y", "int: x = self return abs(x[0] - y[0]) + abs(x[1] - y[1]) +", "- y[2]) @dataclass(frozen=True, order=True) class Box: bot: Pos top: Pos def split_at(self, axis,", "n, l = n.split_at(axis, m.top[axis]+1) fns.append(l) assert sum(x.volume() for x in fns) +", "n.bot[axis] <= m.top[axis] < n.top[axis]: n, l = n.split_at(axis, m.top[axis]+1) fns.append(l) assert sum(x.volume()", "splitoff(x, box) new_on.update(splits) check(new_on, {box}) if cmd == \"on\": new_on |= {box} on", "dataclasses import dataclass from typing import List, NamedTuple, Tuple, Set def extract(s): return", "part1 = { z for x in on if (z := splitoff(x, Box(Pos(-50,-50,-50),", "if n.bot[axis] <= m.top[axis] < n.top[axis]: n, l = n.split_at(axis, m.top[axis]+1) fns.append(l) assert", "Pos(50,50,50)))[1]) } print(sum(x.volume() for x in part1)) print(sum(x.volume() for x in on)) if", "Box: bot: Pos top: Pos def split_at(self, axis, val): atop = self.top[axis] abot", "(cmd, (lo, hi)) in enumerate(cmds): box = Box(Pos(*lo), Pos(*hi)) print(i, cmd, box) new_on", "len(on)) # just for fun, also compute part 1 part1 = { z", "self.top.y >= other.bot.y and other.top.y >= self.bot.y and self.top.z >= other.bot.z and other.top.z", "= [(s.split(\" \")[0], extract(s)) for s in data] cmds = [(s, ((x[0], x[2],", "Pos(*hi)) print(i, cmd, box) new_on = set() for x in on: splits, _", "i, (cmd, (lo, hi)) in enumerate(cmds): box = Box(Pos(*lo), Pos(*hi)) print(i, cmd, box)", "if not n.overlap(m): return [n], None on = n fns = [] for", "y[2]) @dataclass(frozen=True, order=True) class Box: bot: Pos top: Pos def split_at(self, axis, val):", "sum(x.volume() for x in on), len(on)) # just for fun, also compute part", "not n.overlap(m): return [n], None on = n fns = [] for axis", "volume(self): assert self.top.x-self.bot.x >= 0 assert self.top.y-self.bot.y >= 0 assert self.top.z-self.bot.z >= 0", "\"xyz\"[axis] return [ Box(self.bot, self.top._replace(**{axname: mid})), Box(self.bot._replace(**{axname: mid+1}), self.top), ] def overlap(self, other):", ">= 0 return (self.top.x-self.bot.x+1)*(self.top.y-self.bot.y+1)*(self.top.z-self.bot.z+1) def splitoff(n, m): if not n.overlap(m): return [n], None", "for y in rhs: assert x == y or not x.overlap(y), (x,y) def", "in rhs: assert x == y or not x.overlap(y), (x,y) def main(args): data", "top: Pos def split_at(self, axis, val): atop = self.top[axis] abot = self.bot[axis] mid", ">= other.bot.z and other.top.z >= self.bot.z ) def volume(self): assert self.top.x-self.bot.x >= 0", "assert sum(x.volume() for x in fns) + n.volume() == on.volume() check(fns, {m}) return", "[(s, ((x[0], x[2], x[4]), (x[1], x[3], x[5]))) for s, x in cmds] on", "other.top.x >= self.bot.x and self.top.y >= other.bot.y and other.top.y >= self.bot.y and self.top.z", "box = Box(Pos(*lo), Pos(*hi)) print(i, cmd, box) new_on = set() for x in", ">= self.bot.y and self.top.z >= other.bot.z and other.top.z >= self.bot.z ) def volume(self):", "in on: splits, _ = splitoff(x, box) new_on.update(splits) check(new_on, {box}) if cmd ==", "return abs(x[0] - y[0]) + abs(x[1] - y[1]) + abs(x[2] - y[2]) @dataclass(frozen=True,", "def main(args): data = [s.strip() for s in sys.stdin if s.strip() and not", "typing import List, NamedTuple, Tuple, Set def extract(s): return [int(x) for x in", "<= n.top[axis]: l, n = n.split_at(axis, m.bot[axis]) fns.append(l) if n.bot[axis] <= m.top[axis] <", "bot: Pos top: Pos def split_at(self, axis, val): atop = self.top[axis] abot =", "- y[0]) + abs(x[1] - y[1]) + abs(x[2] - y[2]) @dataclass(frozen=True, order=True) class", "dist(self, y: Pos) -> int: x = self return abs(x[0] - y[0]) +", "\"on\": new_on |= {box} on = new_on check(on, on) print(i, sum(x.volume() for x", "assert self.top.y-self.bot.y >= 0 assert self.top.z-self.bot.z >= 0 return (self.top.x-self.bot.x+1)*(self.top.y-self.bot.y+1)*(self.top.z-self.bot.z+1) def splitoff(n, m):", "= n.split_at(axis, m.bot[axis]) fns.append(l) if n.bot[axis] <= m.top[axis] < n.top[axis]: n, l =", "n.bot[axis] < m.bot[axis] <= n.top[axis]: l, n = n.split_at(axis, m.bot[axis]) fns.append(l) if n.bot[axis]", "extract(s): return [int(x) for x in re.findall(r'(-?\\d+).?', s)] class Pos(NamedTuple): x: int y:", "box) new_on = set() for x in on: splits, _ = splitoff(x, box)", "( self.top.x >= other.bot.x and other.top.x >= self.bot.x and self.top.y >= other.bot.y and", "other.top.y >= self.bot.y and self.top.z >= other.bot.z and other.top.z >= self.bot.z ) def", "re from dataclasses import dataclass from typing import List, NamedTuple, Tuple, Set def", "self.bot.y and self.top.z >= other.bot.z and other.top.z >= self.bot.z ) def volume(self): assert", "mid+1}), self.top), ] def overlap(self, other): return ( self.top.x >= other.bot.x and other.top.x", "self.top.y-self.bot.y >= 0 assert self.top.z-self.bot.z >= 0 return (self.top.x-self.bot.x+1)*(self.top.y-self.bot.y+1)*(self.top.z-self.bot.z+1) def splitoff(n, m): if", ">= self.bot.x and self.top.y >= other.bot.y and other.top.y >= self.bot.y and self.top.z >=", "z for x in on if (z := splitoff(x, Box(Pos(-50,-50,-50), Pos(50,50,50)))[1]) } print(sum(x.volume()", "abs(x[0] - y[0]) + abs(x[1] - y[1]) + abs(x[2] - y[2]) @dataclass(frozen=True, order=True)", "+ n.volume() == on.volume() check(fns, {m}) return fns, n def check(lhs, rhs): return", "x.overlap(y), (x,y) def main(args): data = [s.strip() for s in sys.stdin if s.strip()", "int z: int def dist(self, y: Pos) -> int: x = self return", "Pos top: Pos def split_at(self, axis, val): atop = self.top[axis] abot = self.bot[axis]", "in enumerate(cmds): box = Box(Pos(*lo), Pos(*hi)) print(i, cmd, box) new_on = set() for", "1 part1 = { z for x in on if (z := splitoff(x,", "for x in re.findall(r'(-?\\d+).?', s)] class Pos(NamedTuple): x: int y: int z: int", "x == y or not x.overlap(y), (x,y) def main(args): data = [s.strip() for", "order=True) class Box: bot: Pos top: Pos def split_at(self, axis, val): atop =", "Box(Pos(*lo), Pos(*hi)) print(i, cmd, box) new_on = set() for x in on: splits," ]
[ "and told how many # attempts they have remaining. from random import randint", "random_num = randint(0, 10) turns = 5 # 5 turns for turn in", "in range(turns - 1, 0, -1): guess = int(input(\"Make a guess: \")) if", "problem below and then implement it in code. You do not need to", "implement it in code. You do not need to submit your # written", "submit your # written decomposition of how you’ve worked it out but make", "what you’ve done. # # A computer generates a random number from 0", "# Inclusive random_num = randint(0, 10) turns = 5 # 5 turns for", "below and then implement it in code. You do not need to submit", "worked it out but make sure to comment your code # to explain", "get it right. If they get it correct, the program says # they’ve", "written decomposition of how you’ve worked it out but make sure to comment", "# they’ve won and ends. If they’re wrong, they’re asked to guess again", "10) turns = 5 # 5 turns for turn in range(turns - 1,", "randint # Inclusive random_num = randint(0, 10) turns = 5 # 5 turns", "they’re wrong, they’re asked to guess again and told how many # attempts", "0 – 10. It then asks the user to make a # guess.", "to make a # guess. They have 5 attempts to get it right.", "\")) if (guess == random_num): print(\"You're correct!\") break else: print(f\"Incorrect guess. You have", "it in code. You do not need to submit your # written decomposition", "code. You do not need to submit your # written decomposition of how", "out but make sure to comment your code # to explain what you’ve", "If they get it correct, the program says # they’ve won and ends.", "import randint # Inclusive random_num = randint(0, 10) turns = 5 # 5", "A computer generates a random number from 0 – 10. It then asks", "told how many # attempts they have remaining. from random import randint #", "they get it correct, the program says # they’ve won and ends. If", "they’ve won and ends. If they’re wrong, they’re asked to guess again and", "a guess: \")) if (guess == random_num): print(\"You're correct!\") break else: print(f\"Incorrect guess.", "guess: \")) if (guess == random_num): print(\"You're correct!\") break else: print(f\"Incorrect guess. You", "you’ve done. # # A computer generates a random number from 0 –", "right. If they get it correct, the program says # they’ve won and", "You do not need to submit your # written decomposition of how you’ve", "how many # attempts they have remaining. from random import randint # Inclusive", "to explain what you’ve done. # # A computer generates a random number", "number from 0 – 10. It then asks the user to make a", "range(turns - 1, 0, -1): guess = int(input(\"Make a guess: \")) if (guess", "they’re asked to guess again and told how many # attempts they have", "from random import randint # Inclusive random_num = randint(0, 10) turns = 5", "sure to comment your code # to explain what you’ve done. # #", "comment your code # to explain what you’ve done. # # A computer", "It then asks the user to make a # guess. They have 5", "Inclusive random_num = randint(0, 10) turns = 5 # 5 turns for turn", "= int(input(\"Make a guess: \")) if (guess == random_num): print(\"You're correct!\") break else:", "have remaining. from random import randint # Inclusive random_num = randint(0, 10) turns", "in code. You do not need to submit your # written decomposition of", "make a # guess. They have 5 attempts to get it right. If", "need to submit your # written decomposition of how you’ve worked it out", "1, 0, -1): guess = int(input(\"Make a guess: \")) if (guess == random_num):", "to submit your # written decomposition of how you’ve worked it out but", "– 10. It then asks the user to make a # guess. They", "many # attempts they have remaining. from random import randint # Inclusive random_num", "the program says # they’ve won and ends. If they’re wrong, they’re asked", "random import randint # Inclusive random_num = randint(0, 10) turns = 5 #", "explain what you’ve done. # # A computer generates a random number from", "it out but make sure to comment your code # to explain what", "from 0 – 10. It then asks the user to make a #", "# guess. They have 5 attempts to get it right. If they get", "decomposition of how you’ve worked it out but make sure to comment your", "int(input(\"Make a guess: \")) if (guess == random_num): print(\"You're correct!\") break else: print(f\"Incorrect", "remaining. from random import randint # Inclusive random_num = randint(0, 10) turns =", "5 # 5 turns for turn in range(turns - 1, 0, -1): guess", "you’ve worked it out but make sure to comment your code # to", "# attempts they have remaining. from random import randint # Inclusive random_num =", "They have 5 attempts to get it right. If they get it correct,", "wrong, they’re asked to guess again and told how many # attempts they", "to guess again and told how many # attempts they have remaining. from", "# written decomposition of how you’ve worked it out but make sure to", "asked to guess again and told how many # attempts they have remaining.", "do not need to submit your # written decomposition of how you’ve worked", "if (guess == random_num): print(\"You're correct!\") break else: print(f\"Incorrect guess. You have {turn}", "done. # # A computer generates a random number from 0 – 10.", "= randint(0, 10) turns = 5 # 5 turns for turn in range(turns", "randint(0, 10) turns = 5 # 5 turns for turn in range(turns -", "5 attempts to get it right. If they get it correct, the program", "# to explain what you’ve done. # # A computer generates a random", "generates a random number from 0 – 10. It then asks the user", "code # to explain what you’ve done. # # A computer generates a", "but make sure to comment your code # to explain what you’ve done.", "the user to make a # guess. They have 5 attempts to get", "# Read the problem below and then implement it in code. You do", "your code # to explain what you’ve done. # # A computer generates", "it right. If they get it correct, the program says # they’ve won", "0, -1): guess = int(input(\"Make a guess: \")) if (guess == random_num): print(\"You're", "asks the user to make a # guess. They have 5 attempts to", "then implement it in code. You do not need to submit your #", "user to make a # guess. They have 5 attempts to get it", "attempts they have remaining. from random import randint # Inclusive random_num = randint(0,", "make sure to comment your code # to explain what you’ve done. #", "If they’re wrong, they’re asked to guess again and told how many #", "random number from 0 – 10. It then asks the user to make", "and ends. If they’re wrong, they’re asked to guess again and told how", "of how you’ve worked it out but make sure to comment your code", "turns = 5 # 5 turns for turn in range(turns - 1, 0,", "ends. If they’re wrong, they’re asked to guess again and told how many", "- 1, 0, -1): guess = int(input(\"Make a guess: \")) if (guess ==", "= 5 # 5 turns for turn in range(turns - 1, 0, -1):", "your # written decomposition of how you’ve worked it out but make sure", "turns for turn in range(turns - 1, 0, -1): guess = int(input(\"Make a", "correct, the program says # they’ve won and ends. If they’re wrong, they’re", "guess = int(input(\"Make a guess: \")) if (guess == random_num): print(\"You're correct!\") break", "have 5 attempts to get it right. If they get it correct, the", "-1): guess = int(input(\"Make a guess: \")) if (guess == random_num): print(\"You're correct!\")", "the problem below and then implement it in code. You do not need", "# A computer generates a random number from 0 – 10. It then", "a # guess. They have 5 attempts to get it right. If they", "and then implement it in code. You do not need to submit your", "for turn in range(turns - 1, 0, -1): guess = int(input(\"Make a guess:", "guess. They have 5 attempts to get it right. If they get it", "to comment your code # to explain what you’ve done. # # A", "it correct, the program says # they’ve won and ends. If they’re wrong,", "# 5 turns for turn in range(turns - 1, 0, -1): guess =", "== random_num): print(\"You're correct!\") break else: print(f\"Incorrect guess. You have {turn} guesses remaining.\")", "not need to submit your # written decomposition of how you’ve worked it", "turn in range(turns - 1, 0, -1): guess = int(input(\"Make a guess: \"))", "# # A computer generates a random number from 0 – 10. It", "to get it right. If they get it correct, the program says #", "attempts to get it right. If they get it correct, the program says", "says # they’ve won and ends. If they’re wrong, they’re asked to guess", "program says # they’ve won and ends. If they’re wrong, they’re asked to", "again and told how many # attempts they have remaining. from random import", "10. It then asks the user to make a # guess. They have", "5 turns for turn in range(turns - 1, 0, -1): guess = int(input(\"Make", "computer generates a random number from 0 – 10. It then asks the", "they have remaining. from random import randint # Inclusive random_num = randint(0, 10)", "(guess == random_num): print(\"You're correct!\") break else: print(f\"Incorrect guess. You have {turn} guesses", "then asks the user to make a # guess. They have 5 attempts", "won and ends. If they’re wrong, they’re asked to guess again and told", "Read the problem below and then implement it in code. You do not", "how you’ve worked it out but make sure to comment your code #", "guess again and told how many # attempts they have remaining. from random", "get it correct, the program says # they’ve won and ends. If they’re", "a random number from 0 – 10. It then asks the user to" ]
[ "for a dictionary. Keys are tree labels, all values must be dictionaries as", "node representing the tree root from the tree.\"\"\" return tree def get_text(self, node):", "node): \"\"\"Return the text associated with a node.\"\"\" return str(node) class DictTraversal(Traversal): \"\"\"Traversal", "all values must be dictionaries as well.\"\"\" def get_children(self, node): return list(node[1].items()) def", "node): return list(node[1].items()) def get_root(self, tree): return list(tree.items())[0] def get_text(self, node): return node[0]", "return tree def get_text(self, node): \"\"\"Return the text associated with a node.\"\"\" return", "its list of children. \"\"\" attribute = 'children' #: Attribute to use. def", "by the tree rendering functions like :class:`~asciitree.LeftAligned`. \"\"\" def get_children(self, node): \"\"\"Return a", ".util import KeyArgsConstructor class Traversal(KeyArgsConstructor): \"\"\"Traversal method. Used by the tree rendering functions", "of children of a node.\"\"\" raise NotImplementedError def get_root(self, tree): \"\"\"Return a node", "\"\"\"Attribute traversal. Uses an attribute of a node as its list of children.", "\"\"\"Traversal suitable for a dictionary. Keys are tree labels, all values must be", "NotImplementedError def get_root(self, tree): \"\"\"Return a node representing the tree root from the", "values must be dictionaries as well.\"\"\" def get_children(self, node): return list(node[1].items()) def get_root(self,", "Traversal(KeyArgsConstructor): \"\"\"Traversal method. Used by the tree rendering functions like :class:`~asciitree.LeftAligned`. \"\"\" def", "list(tree.items())[0] def get_text(self, node): return node[0] class AttributeTraversal(Traversal): \"\"\"Attribute traversal. Uses an attribute", "return node[0] class AttributeTraversal(Traversal): \"\"\"Attribute traversal. Uses an attribute of a node as", "class AttributeTraversal(Traversal): \"\"\"Attribute traversal. Uses an attribute of a node as its list", "method. Used by the tree rendering functions like :class:`~asciitree.LeftAligned`. \"\"\" def get_children(self, node):", "representing the tree root from the tree.\"\"\" return tree def get_text(self, node): \"\"\"Return", "raise NotImplementedError def get_root(self, tree): \"\"\"Return a node representing the tree root from", "\"\"\"Return the text associated with a node.\"\"\" return str(node) class DictTraversal(Traversal): \"\"\"Traversal suitable", "Keys are tree labels, all values must be dictionaries as well.\"\"\" def get_children(self,", "as well.\"\"\" def get_children(self, node): return list(node[1].items()) def get_root(self, tree): return list(tree.items())[0] def", "dictionary. Keys are tree labels, all values must be dictionaries as well.\"\"\" def", "get_text(self, node): return node[0] class AttributeTraversal(Traversal): \"\"\"Attribute traversal. Uses an attribute of a", "like :class:`~asciitree.LeftAligned`. \"\"\" def get_children(self, node): \"\"\"Return a list of children of a", "def get_root(self, tree): \"\"\"Return a node representing the tree root from the tree.\"\"\"", "get_root(self, tree): \"\"\"Return a node representing the tree root from the tree.\"\"\" return", "the tree root from the tree.\"\"\" return tree def get_text(self, node): \"\"\"Return the", "tree.\"\"\" return tree def get_text(self, node): \"\"\"Return the text associated with a node.\"\"\"", "list of children. \"\"\" attribute = 'children' #: Attribute to use. def get_children(self,", "attribute = 'children' #: Attribute to use. def get_children(self, node): return getattr(node, self.attribute)", "node as its list of children. \"\"\" attribute = 'children' #: Attribute to", "tree rendering functions like :class:`~asciitree.LeftAligned`. \"\"\" def get_children(self, node): \"\"\"Return a list of", "DictTraversal(Traversal): \"\"\"Traversal suitable for a dictionary. Keys are tree labels, all values must", "def get_root(self, tree): return list(tree.items())[0] def get_text(self, node): return node[0] class AttributeTraversal(Traversal): \"\"\"Attribute", "\"\"\" attribute = 'children' #: Attribute to use. def get_children(self, node): return getattr(node,", "node.\"\"\" return str(node) class DictTraversal(Traversal): \"\"\"Traversal suitable for a dictionary. Keys are tree", "def get_children(self, node): return list(node[1].items()) def get_root(self, tree): return list(tree.items())[0] def get_text(self, node):", "class DictTraversal(Traversal): \"\"\"Traversal suitable for a dictionary. Keys are tree labels, all values", "from .util import KeyArgsConstructor class Traversal(KeyArgsConstructor): \"\"\"Traversal method. Used by the tree rendering", "tree root from the tree.\"\"\" return tree def get_text(self, node): \"\"\"Return the text", "return str(node) class DictTraversal(Traversal): \"\"\"Traversal suitable for a dictionary. Keys are tree labels,", "return list(node[1].items()) def get_root(self, tree): return list(tree.items())[0] def get_text(self, node): return node[0] class", "node): return node[0] class AttributeTraversal(Traversal): \"\"\"Attribute traversal. Uses an attribute of a node", "class Traversal(KeyArgsConstructor): \"\"\"Traversal method. Used by the tree rendering functions like :class:`~asciitree.LeftAligned`. \"\"\"", "the tree rendering functions like :class:`~asciitree.LeftAligned`. \"\"\" def get_children(self, node): \"\"\"Return a list", "associated with a node.\"\"\" return str(node) class DictTraversal(Traversal): \"\"\"Traversal suitable for a dictionary.", "traversal. Uses an attribute of a node as its list of children. \"\"\"", "of a node as its list of children. \"\"\" attribute = 'children' #:", "a node.\"\"\" return str(node) class DictTraversal(Traversal): \"\"\"Traversal suitable for a dictionary. Keys are", "attribute of a node as its list of children. \"\"\" attribute = 'children'", "\"\"\" def get_children(self, node): \"\"\"Return a list of children of a node.\"\"\" raise", "a node as its list of children. \"\"\" attribute = 'children' #: Attribute", "a list of children of a node.\"\"\" raise NotImplementedError def get_root(self, tree): \"\"\"Return", "get_children(self, node): return list(node[1].items()) def get_root(self, tree): return list(tree.items())[0] def get_text(self, node): return", "Used by the tree rendering functions like :class:`~asciitree.LeftAligned`. \"\"\" def get_children(self, node): \"\"\"Return", "children. \"\"\" attribute = 'children' #: Attribute to use. def get_children(self, node): return", "a dictionary. Keys are tree labels, all values must be dictionaries as well.\"\"\"", "of children. \"\"\" attribute = 'children' #: Attribute to use. def get_children(self, node):", "AttributeTraversal(Traversal): \"\"\"Attribute traversal. Uses an attribute of a node as its list of", "dictionaries as well.\"\"\" def get_children(self, node): return list(node[1].items()) def get_root(self, tree): return list(tree.items())[0]", "as its list of children. \"\"\" attribute = 'children' #: Attribute to use.", "def get_children(self, node): \"\"\"Return a list of children of a node.\"\"\" raise NotImplementedError", "root from the tree.\"\"\" return tree def get_text(self, node): \"\"\"Return the text associated", "KeyArgsConstructor class Traversal(KeyArgsConstructor): \"\"\"Traversal method. Used by the tree rendering functions like :class:`~asciitree.LeftAligned`.", "rendering functions like :class:`~asciitree.LeftAligned`. \"\"\" def get_children(self, node): \"\"\"Return a list of children", "with a node.\"\"\" return str(node) class DictTraversal(Traversal): \"\"\"Traversal suitable for a dictionary. Keys", "must be dictionaries as well.\"\"\" def get_children(self, node): return list(node[1].items()) def get_root(self, tree):", "from the tree.\"\"\" return tree def get_text(self, node): \"\"\"Return the text associated with", "get_root(self, tree): return list(tree.items())[0] def get_text(self, node): return node[0] class AttributeTraversal(Traversal): \"\"\"Attribute traversal.", "node[0] class AttributeTraversal(Traversal): \"\"\"Attribute traversal. Uses an attribute of a node as its", "well.\"\"\" def get_children(self, node): return list(node[1].items()) def get_root(self, tree): return list(tree.items())[0] def get_text(self,", "text associated with a node.\"\"\" return str(node) class DictTraversal(Traversal): \"\"\"Traversal suitable for a", "get_children(self, node): \"\"\"Return a list of children of a node.\"\"\" raise NotImplementedError def", "def get_text(self, node): return node[0] class AttributeTraversal(Traversal): \"\"\"Attribute traversal. Uses an attribute of", "str(node) class DictTraversal(Traversal): \"\"\"Traversal suitable for a dictionary. Keys are tree labels, all", "tree labels, all values must be dictionaries as well.\"\"\" def get_children(self, node): return", "tree def get_text(self, node): \"\"\"Return the text associated with a node.\"\"\" return str(node)", "def get_text(self, node): \"\"\"Return the text associated with a node.\"\"\" return str(node) class", "functions like :class:`~asciitree.LeftAligned`. \"\"\" def get_children(self, node): \"\"\"Return a list of children of", "the tree.\"\"\" return tree def get_text(self, node): \"\"\"Return the text associated with a", "be dictionaries as well.\"\"\" def get_children(self, node): return list(node[1].items()) def get_root(self, tree): return", "return list(tree.items())[0] def get_text(self, node): return node[0] class AttributeTraversal(Traversal): \"\"\"Attribute traversal. Uses an", "list of children of a node.\"\"\" raise NotImplementedError def get_root(self, tree): \"\"\"Return a", "the text associated with a node.\"\"\" return str(node) class DictTraversal(Traversal): \"\"\"Traversal suitable for", "<gh_stars>10-100 from .util import KeyArgsConstructor class Traversal(KeyArgsConstructor): \"\"\"Traversal method. Used by the tree", "are tree labels, all values must be dictionaries as well.\"\"\" def get_children(self, node):", "\"\"\"Return a node representing the tree root from the tree.\"\"\" return tree def", "a node.\"\"\" raise NotImplementedError def get_root(self, tree): \"\"\"Return a node representing the tree", "suitable for a dictionary. Keys are tree labels, all values must be dictionaries", "labels, all values must be dictionaries as well.\"\"\" def get_children(self, node): return list(node[1].items())", "children of a node.\"\"\" raise NotImplementedError def get_root(self, tree): \"\"\"Return a node representing", "tree): return list(tree.items())[0] def get_text(self, node): return node[0] class AttributeTraversal(Traversal): \"\"\"Attribute traversal. Uses", "node): \"\"\"Return a list of children of a node.\"\"\" raise NotImplementedError def get_root(self,", "\"\"\"Traversal method. Used by the tree rendering functions like :class:`~asciitree.LeftAligned`. \"\"\" def get_children(self,", "list(node[1].items()) def get_root(self, tree): return list(tree.items())[0] def get_text(self, node): return node[0] class AttributeTraversal(Traversal):", ":class:`~asciitree.LeftAligned`. \"\"\" def get_children(self, node): \"\"\"Return a list of children of a node.\"\"\"", "tree): \"\"\"Return a node representing the tree root from the tree.\"\"\" return tree", "get_text(self, node): \"\"\"Return the text associated with a node.\"\"\" return str(node) class DictTraversal(Traversal):", "\"\"\"Return a list of children of a node.\"\"\" raise NotImplementedError def get_root(self, tree):", "node.\"\"\" raise NotImplementedError def get_root(self, tree): \"\"\"Return a node representing the tree root", "a node representing the tree root from the tree.\"\"\" return tree def get_text(self,", "of a node.\"\"\" raise NotImplementedError def get_root(self, tree): \"\"\"Return a node representing the", "import KeyArgsConstructor class Traversal(KeyArgsConstructor): \"\"\"Traversal method. Used by the tree rendering functions like", "an attribute of a node as its list of children. \"\"\" attribute =", "Uses an attribute of a node as its list of children. \"\"\" attribute" ]
[ "template_name = 'oauth2/applications/list.html' class ApplicationRegistration(mixins.ApplicationCreationMixin, mixins.TwoFactorMixin, views.ApplicationRegistration): template_name = 'oauth2/applications/register.html' def get_success_url(self): return", "def get_success_url(self): return reverse_lazy('application_detail', kwargs={'pk': self.object.pk}) class ApplicationDetail(mixins.ApplicationViewMixin, mixins.TwoFactorMixin, views.ApplicationDetail): template_name = 'oauth2/applications/detail.html'", "class ApplicationRegistration(mixins.ApplicationCreationMixin, mixins.TwoFactorMixin, views.ApplicationRegistration): template_name = 'oauth2/applications/register.html' def get_success_url(self): return reverse_lazy('application_detail', kwargs={'pk': self.object.pk})", "views.ApplicationDetail): template_name = 'oauth2/applications/detail.html' class ApplicationDelete(mixins.ApplicationDeleteMixin, mixins.TwoFactorMixin, views.ApplicationDelete): template_name = 'oauth2/applications/delete.html' success_url =", "views from .. import mixins class ApplicationList(mixins.TwoFactorMixin, mixins.ApplicationViewMixin, views.ApplicationList): template_name = 'oauth2/applications/list.html' class", "template_name = 'oauth2/applications/delete.html' success_url = reverse_lazy('application_list') class ApplicationUpdate(mixins.ApplicationChangeMixin, mixins.TwoFactorMixin, views.ApplicationUpdate): template_name = 'oauth2/applications/update.html'", "'oauth2/applications/detail.html' class ApplicationDelete(mixins.ApplicationDeleteMixin, mixins.TwoFactorMixin, views.ApplicationDelete): template_name = 'oauth2/applications/delete.html' success_url = reverse_lazy('application_list') class ApplicationUpdate(mixins.ApplicationChangeMixin,", "self.object.pk}) class ApplicationDetail(mixins.ApplicationViewMixin, mixins.TwoFactorMixin, views.ApplicationDetail): template_name = 'oauth2/applications/detail.html' class ApplicationDelete(mixins.ApplicationDeleteMixin, mixins.TwoFactorMixin, views.ApplicationDelete): template_name", "django.urls import reverse_lazy from oauth2_provider import views from .. import mixins class ApplicationList(mixins.TwoFactorMixin,", "oauth2_provider import views from .. import mixins class ApplicationList(mixins.TwoFactorMixin, mixins.ApplicationViewMixin, views.ApplicationList): template_name =", "ApplicationRegistration(mixins.ApplicationCreationMixin, mixins.TwoFactorMixin, views.ApplicationRegistration): template_name = 'oauth2/applications/register.html' def get_success_url(self): return reverse_lazy('application_detail', kwargs={'pk': self.object.pk}) class", "views.ApplicationList): template_name = 'oauth2/applications/list.html' class ApplicationRegistration(mixins.ApplicationCreationMixin, mixins.TwoFactorMixin, views.ApplicationRegistration): template_name = 'oauth2/applications/register.html' def get_success_url(self):", "'oauth2/applications/delete.html' success_url = reverse_lazy('application_list') class ApplicationUpdate(mixins.ApplicationChangeMixin, mixins.TwoFactorMixin, views.ApplicationUpdate): template_name = 'oauth2/applications/update.html' def get_success_url(self):", "kwargs={'pk': self.object.pk}) class ApplicationDetail(mixins.ApplicationViewMixin, mixins.TwoFactorMixin, views.ApplicationDetail): template_name = 'oauth2/applications/detail.html' class ApplicationDelete(mixins.ApplicationDeleteMixin, mixins.TwoFactorMixin, views.ApplicationDelete):", "= 'oauth2/applications/list.html' class ApplicationRegistration(mixins.ApplicationCreationMixin, mixins.TwoFactorMixin, views.ApplicationRegistration): template_name = 'oauth2/applications/register.html' def get_success_url(self): return reverse_lazy('application_detail',", "ApplicationDelete(mixins.ApplicationDeleteMixin, mixins.TwoFactorMixin, views.ApplicationDelete): template_name = 'oauth2/applications/delete.html' success_url = reverse_lazy('application_list') class ApplicationUpdate(mixins.ApplicationChangeMixin, mixins.TwoFactorMixin, views.ApplicationUpdate):", "reverse_lazy('application_detail', kwargs={'pk': self.object.pk}) class ApplicationDetail(mixins.ApplicationViewMixin, mixins.TwoFactorMixin, views.ApplicationDetail): template_name = 'oauth2/applications/detail.html' class ApplicationDelete(mixins.ApplicationDeleteMixin, mixins.TwoFactorMixin,", "ApplicationList(mixins.TwoFactorMixin, mixins.ApplicationViewMixin, views.ApplicationList): template_name = 'oauth2/applications/list.html' class ApplicationRegistration(mixins.ApplicationCreationMixin, mixins.TwoFactorMixin, views.ApplicationRegistration): template_name = 'oauth2/applications/register.html'", "class ApplicationDetail(mixins.ApplicationViewMixin, mixins.TwoFactorMixin, views.ApplicationDetail): template_name = 'oauth2/applications/detail.html' class ApplicationDelete(mixins.ApplicationDeleteMixin, mixins.TwoFactorMixin, views.ApplicationDelete): template_name =", ".. import mixins class ApplicationList(mixins.TwoFactorMixin, mixins.ApplicationViewMixin, views.ApplicationList): template_name = 'oauth2/applications/list.html' class ApplicationRegistration(mixins.ApplicationCreationMixin, mixins.TwoFactorMixin,", "= reverse_lazy('application_list') class ApplicationUpdate(mixins.ApplicationChangeMixin, mixins.TwoFactorMixin, views.ApplicationUpdate): template_name = 'oauth2/applications/update.html' def get_success_url(self): return reverse_lazy('application_detail',", "template_name = 'oauth2/applications/detail.html' class ApplicationDelete(mixins.ApplicationDeleteMixin, mixins.TwoFactorMixin, views.ApplicationDelete): template_name = 'oauth2/applications/delete.html' success_url = reverse_lazy('application_list')", "= 'oauth2/applications/register.html' def get_success_url(self): return reverse_lazy('application_detail', kwargs={'pk': self.object.pk}) class ApplicationDetail(mixins.ApplicationViewMixin, mixins.TwoFactorMixin, views.ApplicationDetail): template_name", "reverse_lazy from oauth2_provider import views from .. import mixins class ApplicationList(mixins.TwoFactorMixin, mixins.ApplicationViewMixin, views.ApplicationList):", "'oauth2/applications/list.html' class ApplicationRegistration(mixins.ApplicationCreationMixin, mixins.TwoFactorMixin, views.ApplicationRegistration): template_name = 'oauth2/applications/register.html' def get_success_url(self): return reverse_lazy('application_detail', kwargs={'pk':", "ApplicationDetail(mixins.ApplicationViewMixin, mixins.TwoFactorMixin, views.ApplicationDetail): template_name = 'oauth2/applications/detail.html' class ApplicationDelete(mixins.ApplicationDeleteMixin, mixins.TwoFactorMixin, views.ApplicationDelete): template_name = 'oauth2/applications/delete.html'", "= 'oauth2/applications/detail.html' class ApplicationDelete(mixins.ApplicationDeleteMixin, mixins.TwoFactorMixin, views.ApplicationDelete): template_name = 'oauth2/applications/delete.html' success_url = reverse_lazy('application_list') class", "views.ApplicationDelete): template_name = 'oauth2/applications/delete.html' success_url = reverse_lazy('application_list') class ApplicationUpdate(mixins.ApplicationChangeMixin, mixins.TwoFactorMixin, views.ApplicationUpdate): template_name =", "import reverse_lazy from oauth2_provider import views from .. import mixins class ApplicationList(mixins.TwoFactorMixin, mixins.ApplicationViewMixin,", "mixins.ApplicationViewMixin, views.ApplicationList): template_name = 'oauth2/applications/list.html' class ApplicationRegistration(mixins.ApplicationCreationMixin, mixins.TwoFactorMixin, views.ApplicationRegistration): template_name = 'oauth2/applications/register.html' def", "class ApplicationList(mixins.TwoFactorMixin, mixins.ApplicationViewMixin, views.ApplicationList): template_name = 'oauth2/applications/list.html' class ApplicationRegistration(mixins.ApplicationCreationMixin, mixins.TwoFactorMixin, views.ApplicationRegistration): template_name =", "from .. import mixins class ApplicationList(mixins.TwoFactorMixin, mixins.ApplicationViewMixin, views.ApplicationList): template_name = 'oauth2/applications/list.html' class ApplicationRegistration(mixins.ApplicationCreationMixin,", "mixins.TwoFactorMixin, views.ApplicationRegistration): template_name = 'oauth2/applications/register.html' def get_success_url(self): return reverse_lazy('application_detail', kwargs={'pk': self.object.pk}) class ApplicationDetail(mixins.ApplicationViewMixin,", "views.ApplicationRegistration): template_name = 'oauth2/applications/register.html' def get_success_url(self): return reverse_lazy('application_detail', kwargs={'pk': self.object.pk}) class ApplicationDetail(mixins.ApplicationViewMixin, mixins.TwoFactorMixin,", "template_name = 'oauth2/applications/register.html' def get_success_url(self): return reverse_lazy('application_detail', kwargs={'pk': self.object.pk}) class ApplicationDetail(mixins.ApplicationViewMixin, mixins.TwoFactorMixin, views.ApplicationDetail):", "success_url = reverse_lazy('application_list') class ApplicationUpdate(mixins.ApplicationChangeMixin, mixins.TwoFactorMixin, views.ApplicationUpdate): template_name = 'oauth2/applications/update.html' def get_success_url(self): return", "import mixins class ApplicationList(mixins.TwoFactorMixin, mixins.ApplicationViewMixin, views.ApplicationList): template_name = 'oauth2/applications/list.html' class ApplicationRegistration(mixins.ApplicationCreationMixin, mixins.TwoFactorMixin, views.ApplicationRegistration):", "mixins.TwoFactorMixin, views.ApplicationDelete): template_name = 'oauth2/applications/delete.html' success_url = reverse_lazy('application_list') class ApplicationUpdate(mixins.ApplicationChangeMixin, mixins.TwoFactorMixin, views.ApplicationUpdate): template_name", "from django.urls import reverse_lazy from oauth2_provider import views from .. import mixins class", "get_success_url(self): return reverse_lazy('application_detail', kwargs={'pk': self.object.pk}) class ApplicationDetail(mixins.ApplicationViewMixin, mixins.TwoFactorMixin, views.ApplicationDetail): template_name = 'oauth2/applications/detail.html' class", "return reverse_lazy('application_detail', kwargs={'pk': self.object.pk}) class ApplicationDetail(mixins.ApplicationViewMixin, mixins.TwoFactorMixin, views.ApplicationDetail): template_name = 'oauth2/applications/detail.html' class ApplicationDelete(mixins.ApplicationDeleteMixin,", "mixins.TwoFactorMixin, views.ApplicationDetail): template_name = 'oauth2/applications/detail.html' class ApplicationDelete(mixins.ApplicationDeleteMixin, mixins.TwoFactorMixin, views.ApplicationDelete): template_name = 'oauth2/applications/delete.html' success_url", "class ApplicationDelete(mixins.ApplicationDeleteMixin, mixins.TwoFactorMixin, views.ApplicationDelete): template_name = 'oauth2/applications/delete.html' success_url = reverse_lazy('application_list') class ApplicationUpdate(mixins.ApplicationChangeMixin, mixins.TwoFactorMixin,", "import views from .. import mixins class ApplicationList(mixins.TwoFactorMixin, mixins.ApplicationViewMixin, views.ApplicationList): template_name = 'oauth2/applications/list.html'", "class ApplicationUpdate(mixins.ApplicationChangeMixin, mixins.TwoFactorMixin, views.ApplicationUpdate): template_name = 'oauth2/applications/update.html' def get_success_url(self): return reverse_lazy('application_detail', kwargs={'pk': self.object.pk})", "reverse_lazy('application_list') class ApplicationUpdate(mixins.ApplicationChangeMixin, mixins.TwoFactorMixin, views.ApplicationUpdate): template_name = 'oauth2/applications/update.html' def get_success_url(self): return reverse_lazy('application_detail', kwargs={'pk':", "= 'oauth2/applications/delete.html' success_url = reverse_lazy('application_list') class ApplicationUpdate(mixins.ApplicationChangeMixin, mixins.TwoFactorMixin, views.ApplicationUpdate): template_name = 'oauth2/applications/update.html' def", "'oauth2/applications/register.html' def get_success_url(self): return reverse_lazy('application_detail', kwargs={'pk': self.object.pk}) class ApplicationDetail(mixins.ApplicationViewMixin, mixins.TwoFactorMixin, views.ApplicationDetail): template_name =", "from oauth2_provider import views from .. import mixins class ApplicationList(mixins.TwoFactorMixin, mixins.ApplicationViewMixin, views.ApplicationList): template_name", "mixins class ApplicationList(mixins.TwoFactorMixin, mixins.ApplicationViewMixin, views.ApplicationList): template_name = 'oauth2/applications/list.html' class ApplicationRegistration(mixins.ApplicationCreationMixin, mixins.TwoFactorMixin, views.ApplicationRegistration): template_name" ]
[ "del(bot.dnd[key]) break if data[1] in [ID.HON_STATUS_ONLINE]: \"\"\" \"\"\" #nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] =", "= u[0] if id in bot.clan_roster: if u[1] in [ID.HON_STATUS_ONLINE, ID.HON_STATUS_INGAME]: nick =", "[ID.HON_SC_CLAN_MEMBER_CHANGE] def add_member(bot,origin,data): id = data[0] bot.clan_roster[id] = {\"rank\":\"Member\"} if bot.config.welcome_members > 0", "data[1]: id = u[0] if id in bot.clan_roster: if u[1] in [ID.HON_STATUS_ONLINE, ID.HON_STATUS_INGAME]:", "unofficer.commands = ['unofficer'] def announce(bot, input): if not input.admin: return False if not", "bot.reply(\"Invited {0}\".format(input.group(2))) invite.commands = ['invite'] def remove(bot,input): \"\"\"remove from clan, admins only\"\"\" if", "= u[1] elif bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = u[1] member_initstatus.event = [ID.HON_SC_INITIAL_STATUS] member_initstatus.thread", "input.group(2): return nick = input.group(2).lower() if not nick in bot.config.officers: bot.config.set_add('officers', nick) bot.reply(\"Added", "{0} to officer list\".format(nick)) else: bot.reply(nick + \" is already an officer\") officer.commands", "enumerate(bot.dnd): if id in bot.id2nick and bot.id2nick[id] == nick: del(bot.dnd[key]) break if data[1]", "+ \" is already an officer\") officer.commands = ['officer'] def unofficer(bot, input): \"\"\"Remove", "member_initstatus(bot,origin,data): for u in data[1]: id = u[0] if id in bot.clan_roster: if", "} def sublist(alist, value): return [dictio for dictio in alist if alist[dictio] ==", "bot.clan_roster: player = bot.clan_roster[id] rank = player['rank'] query = {'nickname' : nick} query['f']", "elif status == 1: if who in bot.clan_roster: bot.clan_roster[who]['rank'] = 'Member' else: bot.clan_roster[who]", "- Members: {1}, Online: {2}, In-Game: {3}\" .format( bot.clan_info['name'], len(bot.clan_roster), len(sublist(bot.clan_status, ID.HON_STATUS_ONLINE)), len(sublist(bot.clan_status,", "'Leader']): if not input.nick in bot.config.officers and not input.nick in bot.config.allowdnd: return for", "else: id = bot.nick2id[nick] if id in bot.clan_roster: player = bot.clan_roster[id] rank =", "{1}, Online: {2}, In-Game: {3}\" .format( bot.clan_info['name'], len(bot.clan_roster), len(sublist(bot.clan_status, ID.HON_STATUS_ONLINE)), len(sublist(bot.clan_status, ID.HON_STATUS_INGAME)) )", "if len(avail_officers) > 0: outstr = ', '.join(avail_officers) else: outstr = 'None' bot.reply(", "member_changestatus.event = [ID.HON_SC_UPDATE_STATUS] def member_initstatus(bot,origin,data): for u in data[1]: id = u[0] if", "isn't an officer\") unofficer.commands = ['unofficer'] def announce(bot, input): if not input.admin: return", "in enumerate(bot.dnd): if id in bot.id2nick and bot.id2nick[id] == nick: del(bot.dnd[key]) break if", "id in bot.id2nick and bot.id2nick[id] == nick: del(bot.dnd[key]) break if data[1] in [ID.HON_STATUS_ONLINE]:", "bot.id2nick: nick = bot.id2nick[id] bot.write_packet(ID.HON_CS_CLAN_MESSAGE,'Welcome, {0}!'.format(nick)) add_member.event = [ID.HON_SC_CLAN_MEMBER_ADDED] def member_changestatus(bot,origin,data): id =", "'set_rank', 'target_id' : id, 'member_ck': bot.cookie, 'rank' : 'Remove', 'clan_id' : bot.clan_info['clan_id'] }", "who in bot.clan_roster: bot.clan_roster[who]['rank'] = 'Member' else: bot.clan_roster[who] = {\"rank\":\"Member\"} elif status ==", "bot.clan_info['clan_id'] } bot.masterserver_request(query) bot.reply(nick + \" was removed from the clan\") remove.commands =", "= [ID.HON_SC_INITIAL_STATUS] member_initstatus.thread = False def invite(bot,input): \"\"\"invites to clan, admins only\"\"\" if", "not input.admin: return False nick = input.group(2).lower() if nick not in bot.nick2id: bot.reply('Sorry,", "remove(bot,input): \"\"\"remove from clan, admins only\"\"\" if not input.admin: return False nick =", "can set themselves to not appear in player listing commands\"\"\" if input.nick not", "bot.clan_status[id] = u[1] member_initstatus.event = [ID.HON_SC_INITIAL_STATUS] member_initstatus.thread = False def invite(bot,input): \"\"\"invites to", "bot.id2nick and bot.id2nick[id] == nick: del(bot.dnd[key]) break if data[1] in [ID.HON_STATUS_ONLINE]: \"\"\" \"\"\"", "= input.group(2).lower() if nick not in bot.nick2id: bot.reply(\"Unknown Player\") else: id = bot.nick2id[nick]", "\"In Lobby\", ID.HON_STATUS_INGAME: \"In Game\" } def sublist(alist, value): return [dictio for dictio", "elif bot.id2nick[ply] in bot.config.officers: if ply in bot.user_status and bot.user_status[ply] is ID.HON_STATUS_ONLINE: avail_officers.append(bot.id2nick[ply])", "if not input.group(2): return nick = input.group(2).lower() if nick in bot.config.officers: bot.config.set_del('officers', nick)", "['announce'] def dnd(bot, input): \"\"\"Users can set themselves to not appear in player", "query = { 'f' : 'set_rank', 'target_id' : id, 'member_ck': bot.cookie, 'rank' :", "bot.clan_status and ply in bot.clan_roster: if bot.clan_status[ply] is ID.HON_STATUS_ONLINE and (bot.clan_roster[ply]['rank'] in ['Officer',", "Officer Alt\"\"\" if not input.admin: return False if not input.group(2): return nick =", "['officer'] def unofficer(bot, input): \"\"\"Remove Officer Alt\"\"\" if not input.admin: return False if", "for ply in bot.id2nick: if ply == bot.account_id: continue if bot.id2nick[ply] in bot.dnd:", "officer\") unofficer.commands = ['unofficer'] def announce(bot, input): if not input.admin: return False if", "status == 0: del(bot.clan_roster[who]) elif status == 1: if who in bot.clan_roster: bot.clan_roster[who]['rank']", "[] for ply in bot.id2nick: if ply == bot.account_id: continue if bot.id2nick[ply] in", "bot.config.allowdnd: return for key, nick in enumerate(bot.dnd): if input.nick == nick: bot.reply(\"You are", "not in bot.nick2id: bot.reply(\"Unknown Player\") else: id = bot.nick2id[nick] if id in bot.clan_roster:", "= 'Leader' change_member.event = [ID.HON_SC_CLAN_MEMBER_CHANGE] def add_member(bot,origin,data): id = data[0] bot.clan_roster[id] = {\"rank\":\"Member\"}", "'rank' : 'Remove', 'clan_id' : bot.clan_info['clan_id'] } bot.masterserver_request(query) bot.reply(nick + \" was removed", "data[1] if data[0] in [ID.HON_STATUS_OFFLINE]: for key, nick in enumerate(bot.dnd): if id in", "bot.write_packet(ID.HON_CS_CLAN_ADD_MEMBER,input.group(2)) bot.reply(\"Invited {0}\".format(input.group(2))) invite.commands = ['invite'] def remove(bot,input): \"\"\"remove from clan, admins only\"\"\"", "remove.commands = ['remove'] status = { ID.HON_STATUS_OFFLINE: \"Offline\", ID.HON_STATUS_ONLINE: \"Online\", ID.HON_STATUS_INLOBBY: \"In Lobby\",", "= 'None' bot.reply( \"Available officers: {0}\".format( outstr ) ) officers.commands = ['officers'] def", "{\"rank\":\"Member\"} elif status == 2: bot.clan_roster[who]['rank'] = 'Officer' elif status == 3:#not sure", "bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) elif id in bot.id2nick and bot.id2nick[id] == bot.config.owner:", "in bot.config.officers): avail_officers.append(bot.id2nick[ply]) elif bot.id2nick[ply] in bot.config.officers: if ply in bot.user_status and bot.user_status[ply]", "return officers(bot, input) if not input.group(2): return nick = input.group(2).lower() if not nick", "input.admin: return False nick = input.group(2).lower() if nick not in bot.nick2id: bot.reply('Sorry, I", "key, nick in enumerate(bot.dnd): if input.nick == nick: bot.reply(\"You are now available.\") del(bot.dnd[key])", "not input.group(2): return nick = input.group(2).lower() if nick in bot.config.officers: bot.config.set_del('officers', nick) bot.reply(\"Removed", "player listing commands\"\"\" if input.nick not in bot.nick2id: bot.reply(\"Error occurred\") return id =", "if bot.id2nick[ply] in bot.dnd: continue if ply in bot.clan_status and ply in bot.clan_roster:", "[ID.HON_STATUS_OFFLINE]: for key, nick in enumerate(bot.dnd): if id in bot.id2nick and bot.id2nick[id] ==", "not input.admin: return False if not input.group(2): return bot.write_packet(ID.HON_CS_CLAN_MESSAGE, input.group(2)) announce.commands = ['announce']", "{3}\" .format( bot.clan_info['name'], len(bot.clan_roster), len(sublist(bot.clan_status, ID.HON_STATUS_ONLINE)), len(sublist(bot.clan_status, ID.HON_STATUS_INGAME)) ) ) else: nick =", "Members: {1}, Online: {2}, In-Game: {3}\" .format( bot.clan_info['name'], len(bot.clan_roster), len(sublist(bot.clan_status, ID.HON_STATUS_ONLINE)), len(sublist(bot.clan_status, ID.HON_STATUS_INGAME))", "else: id = bot.nick2id[nick] bot.write_packet(ID.HON_CS_CLAN_REMOVE_MEMBER,id) query = { 'f' : 'set_rank', 'target_id' :", "ID.HON_STATUS_ONLINE)), len(sublist(bot.clan_status, ID.HON_STATUS_INGAME)) ) ) else: nick = input.group(2).lower() if nick not in", "non-zero value']) bot.config.module_config('officers', [[], 'Officers alts']) bot.config.module_config('allowdnd', [[], 'Allowed to use DND command'])", "in [ID.HON_STATUS_OFFLINE]: for key, nick in enumerate(bot.dnd): if id in bot.id2nick and bot.id2nick[id]", "elif status == 2: bot.clan_roster[who]['rank'] = 'Officer' elif status == 3:#not sure about", "status == 1: if who in bot.clan_roster: bot.clan_roster[who]['rank'] = 'Member' else: bot.clan_roster[who] =", "alist[dictio] == value] def info(bot,input): \"\"\"Get clan member info\"\"\" if not input.group(2): bot.reply(", "\"\"\"Users can set themselves to not appear in player listing commands\"\"\" if input.nick", "\" was removed from the clan\") remove.commands = ['remove'] status = { ID.HON_STATUS_OFFLINE:", "= bot.nick2id[input.nick] if not id in bot.clan_roster or (id in bot.clan_roster and not", "nick) bot.reply(\"Added {0} to officer list\".format(nick)) else: bot.reply(nick + \" is already an", "[] def change_member(bot,origin,data): who,status,whodid = data[0],data[1],data[2] if status == 0: del(bot.clan_roster[who]) elif status", "id = data[0] if id in bot.clan_roster: bot.clan_status[id] = data[1] if data[0] in", "nick) elif id in bot.id2nick and bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = data[1] member_changestatus.event", "data[0] in [ID.HON_STATUS_OFFLINE]: for key, nick in enumerate(bot.dnd): if id in bot.id2nick and", "Online: {2}, In-Game: {3}\" .format( bot.clan_info['name'], len(bot.clan_roster), len(sublist(bot.clan_status, ID.HON_STATUS_ONLINE)), len(sublist(bot.clan_status, ID.HON_STATUS_INGAME)) ) )", "not input.admin: return False if not input.group(2): return nick = input.group(2).lower() if nick", "bot.nick2id[nick] if id in bot.clan_roster: player = bot.clan_roster[id] rank = player['rank'] query =", "[ID.HON_SC_UPDATE_STATUS] def member_initstatus(bot,origin,data): for u in data[1]: id = u[0] if id in", "{ 'f' : 'set_rank', 'target_id' : id, 'member_ck': bot.cookie, 'rank' : 'Remove', 'clan_id'", "data[1] member_changestatus.event = [ID.HON_SC_UPDATE_STATUS] def member_initstatus(bot,origin,data): for u in data[1]: id = u[0]", "(bot.clan_roster[ply]['rank'] in ['Officer', 'Leader'] or bot.id2nick[ply] in bot.config.officers): avail_officers.append(bot.id2nick[ply]) elif bot.id2nick[ply] in bot.config.officers:", ") ) officers.commands = ['officers'] def officer(bot, input): \"\"\"Add Officer Alt\"\"\" if not", "else: bot.reply(nick + \" is already an officer\") officer.commands = ['officer'] def unofficer(bot,", "Game\" } def sublist(alist, value): return [dictio for dictio in alist if alist[dictio]", "bot.config.officers: bot.config.set_add('officers', nick) bot.reply(\"Added {0} to officer list\".format(nick)) else: bot.reply(nick + \" is", "unofficer(bot, input): \"\"\"Remove Officer Alt\"\"\" if not input.admin: return False if not input.group(2):", "'target_id' : id, 'member_ck': bot.cookie, 'rank' : 'Remove', 'clan_id' : bot.clan_info['clan_id'] } bot.masterserver_request(query)", "= {'nickname' : nick} query['f'] = 'show_stats' query['table'] = 'player' data = bot.masterserver_request(query,cookie=True)", "member_changestatus(bot,origin,data): id = data[0] if id in bot.clan_roster: bot.clan_status[id] = data[1] if data[0]", "clan, admins only\"\"\" if not input.admin: return False bot.write_packet(ID.HON_CS_CLAN_ADD_MEMBER,input.group(2)) bot.reply(\"Invited {0}\".format(input.group(2))) invite.commands =", "not input.nick in bot.config.allowdnd: return for key, nick in enumerate(bot.dnd): if input.nick ==", "bot.dnd: continue if ply in bot.clan_status and ply in bot.clan_roster: if bot.clan_status[ply] is", "id = bot.nick2id[input.nick] if not id in bot.clan_roster or (id in bot.clan_roster and", "'Officers alts']) bot.config.module_config('allowdnd', [[], 'Allowed to use DND command']) bot.dnd = [] def", "u[1] elif bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = u[1] member_initstatus.event = [ID.HON_SC_INITIAL_STATUS] member_initstatus.thread =", "== bot.config.owner: bot.clan_status[id] = data[1] member_changestatus.event = [ID.HON_SC_UPDATE_STATUS] def member_initstatus(bot,origin,data): for u in", "welcome members in /c m if set to non-zero value']) bot.config.module_config('officers', [[], 'Officers", "== bot.account_id: continue if bot.id2nick[ply] in bot.dnd: continue if ply in bot.clan_status and", "in bot.nick2id: bot.reply('Sorry, I don''t know ' + nick) else: id = bot.nick2id[nick]", "return nick = input.group(2).lower() if not nick in bot.config.officers: bot.config.set_add('officers', nick) bot.reply(\"Added {0}", "officer list\".format(nick)) else: bot.reply(nick + \" is already an officer\") officer.commands = ['officer']", "and bot.id2nick[id] == nick: del(bot.dnd[key]) break if data[1] in [ID.HON_STATUS_ONLINE]: \"\"\" \"\"\" #nick", "and id in bot.id2nick: nick = bot.id2nick[id] bot.write_packet(ID.HON_CS_CLAN_MESSAGE,'Welcome, {0}!'.format(nick)) add_member.event = [ID.HON_SC_CLAN_MEMBER_ADDED] def", "def dnd(bot, input): \"\"\"Users can set themselves to not appear in player listing", "in bot.id2nick: if ply == bot.account_id: continue if bot.id2nick[ply] in bot.dnd: continue if", "input.nick in bot.config.officers and not input.nick in bot.config.allowdnd: return for key, nick in", "ID.HON_STATUS_INGAME: \"In Game\" } def sublist(alist, value): return [dictio for dictio in alist", "= ['officers'] def officer(bot, input): \"\"\"Add Officer Alt\"\"\" if not input.admin: return officers(bot,", "if not input.admin: return officers(bot, input) if not input.group(2): return nick = input.group(2).lower()", "input): \"\"\"Find available officers\"\"\" avail_officers = [] for ply in bot.id2nick: if ply", "hon.packets import ID def setup(bot): bot.config.module_config('welcome_members',[1,'Will welcome members in /c m if set", "['remove'] status = { ID.HON_STATUS_OFFLINE: \"Offline\", ID.HON_STATUS_ONLINE: \"Online\", ID.HON_STATUS_INLOBBY: \"In Lobby\", ID.HON_STATUS_INGAME: \"In", "bot.config.officers and not input.nick in bot.config.allowdnd: return for key, nick in enumerate(bot.dnd): if", "\"\"\"invites to clan, admins only\"\"\" if not input.admin: return False bot.write_packet(ID.HON_CS_CLAN_ADD_MEMBER,input.group(2)) bot.reply(\"Invited {0}\".format(input.group(2)))", "admins only\"\"\" if not input.admin: return False nick = input.group(2).lower() if nick not", "not input.group(2): return bot.write_packet(ID.HON_CS_CLAN_MESSAGE, input.group(2)) announce.commands = ['announce'] def dnd(bot, input): \"\"\"Users can", "nick in enumerate(bot.dnd): if input.nick == nick: bot.reply(\"You are now available.\") del(bot.dnd[key]) return", "officers.commands = ['officers'] def officer(bot, input): \"\"\"Add Officer Alt\"\"\" if not input.admin: return", "def change_member(bot,origin,data): who,status,whodid = data[0],data[1],data[2] if status == 0: del(bot.clan_roster[who]) elif status ==", "bot.clan_status[id] = data[1] if data[0] in [ID.HON_STATUS_OFFLINE]: for key, nick in enumerate(bot.dnd): if", "was removed from the clan\") remove.commands = ['remove'] status = { ID.HON_STATUS_OFFLINE: \"Offline\",", "bot.clan_roster: if bot.clan_status[ply] is ID.HON_STATUS_ONLINE and (bot.clan_roster[ply]['rank'] in ['Officer', 'Leader'] or bot.id2nick[ply] in", "about this one bot.clan_roster[who]['rank'] = 'Leader' change_member.event = [ID.HON_SC_CLAN_MEMBER_CHANGE] def add_member(bot,origin,data): id =", "id in bot.clan_roster: bot.clan_status[id] = data[1] if data[0] in [ID.HON_STATUS_OFFLINE]: for key, nick", "= ', '.join(avail_officers) else: outstr = 'None' bot.reply( \"Available officers: {0}\".format( outstr )", "bot.write_packet(ID.HON_CS_CLAN_REMOVE_MEMBER,id) query = { 'f' : 'set_rank', 'target_id' : id, 'member_ck': bot.cookie, 'rank'", "= bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) elif id in bot.id2nick and bot.id2nick[id] ==", "len(bot.clan_roster), len(sublist(bot.clan_status, ID.HON_STATUS_ONLINE)), len(sublist(bot.clan_status, ID.HON_STATUS_INGAME)) ) ) else: nick = input.group(2).lower() if nick", "= ['info'] def officers(bot, input): \"\"\"Find available officers\"\"\" avail_officers = [] for ply", "bot.clan_status[id] = u[1] elif bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = u[1] member_initstatus.event = [ID.HON_SC_INITIAL_STATUS]", "bot.reply( \"{0} - Members: {1}, Online: {2}, In-Game: {3}\" .format( bot.clan_info['name'], len(bot.clan_roster), len(sublist(bot.clan_status,", "the clan\") remove.commands = ['remove'] status = { ID.HON_STATUS_OFFLINE: \"Offline\", ID.HON_STATUS_ONLINE: \"Online\", ID.HON_STATUS_INLOBBY:", "def info(bot,input): \"\"\"Get clan member info\"\"\" if not input.group(2): bot.reply( \"{0} - Members:", "ply in bot.clan_roster: if bot.clan_status[ply] is ID.HON_STATUS_ONLINE and (bot.clan_roster[ply]['rank'] in ['Officer', 'Leader'] or", "nick = input.group(2).lower() if nick not in bot.nick2id: bot.reply('Sorry, I don''t know '", "bot.clan_info['name'], len(bot.clan_roster), len(sublist(bot.clan_status, ID.HON_STATUS_ONLINE)), len(sublist(bot.clan_status, ID.HON_STATUS_INGAME)) ) ) else: nick = input.group(2).lower() if", "in enumerate(bot.dnd): if input.nick == nick: bot.reply(\"You are now available.\") del(bot.dnd[key]) return bot.reply(\"You", "u[1] member_initstatus.event = [ID.HON_SC_INITIAL_STATUS] member_initstatus.thread = False def invite(bot,input): \"\"\"invites to clan, admins", "bot.reply(\"Added {0} to officer list\".format(nick)) else: bot.reply(nick + \" is already an officer\")", "return False if not input.group(2): return bot.write_packet(ID.HON_CS_CLAN_MESSAGE, input.group(2)) announce.commands = ['announce'] def dnd(bot,", "bot.clan_roster[id]['rank'] in ['Officer', 'Leader']): if not input.nick in bot.config.officers and not input.nick in", "data[1] in [ID.HON_STATUS_ONLINE]: \"\"\" \"\"\" #nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) elif", "nick not in bot.nick2id: bot.reply(\"Unknown Player\") else: id = bot.nick2id[nick] if id in", "0: del(bot.clan_roster[who]) elif status == 1: if who in bot.clan_roster: bot.clan_roster[who]['rank'] = 'Member'", "', '.join(avail_officers) else: outstr = 'None' bot.reply( \"Available officers: {0}\".format( outstr ) )", "in bot.clan_roster: bot.clan_status[id] = data[1] if data[0] in [ID.HON_STATUS_OFFLINE]: for key, nick in", "continue if bot.id2nick[ply] in bot.dnd: continue if ply in bot.clan_status and ply in", "{ ID.HON_STATUS_OFFLINE: \"Offline\", ID.HON_STATUS_ONLINE: \"Online\", ID.HON_STATUS_INLOBBY: \"In Lobby\", ID.HON_STATUS_INGAME: \"In Game\" } def", "(id in bot.clan_roster and not bot.clan_roster[id]['rank'] in ['Officer', 'Leader']): if not input.nick in", "False if not input.group(2): return nick = input.group(2).lower() if nick in bot.config.officers: bot.config.set_del('officers',", "return bot.write_packet(ID.HON_CS_CLAN_MESSAGE, input.group(2)) announce.commands = ['announce'] def dnd(bot, input): \"\"\"Users can set themselves", "bot.reply(\"You are now available.\") del(bot.dnd[key]) return bot.reply(\"You are now unavailable.\") bot.dnd.append(input.nick) dnd.commands =", "def add_member(bot,origin,data): id = data[0] bot.clan_roster[id] = {\"rank\":\"Member\"} if bot.config.welcome_members > 0 and", "return False if not input.group(2): return nick = input.group(2).lower() if nick in bot.config.officers:", "clan\") info.commands = ['info'] def officers(bot, input): \"\"\"Find available officers\"\"\" avail_officers = []", "input): \"\"\"Users can set themselves to not appear in player listing commands\"\"\" if", "bot.dnd = [] def change_member(bot,origin,data): who,status,whodid = data[0],data[1],data[2] if status == 0: del(bot.clan_roster[who])", "and not bot.clan_roster[id]['rank'] in ['Officer', 'Leader']): if not input.nick in bot.config.officers and not", "if not input.group(2): return bot.write_packet(ID.HON_CS_CLAN_MESSAGE, input.group(2)) announce.commands = ['announce'] def dnd(bot, input): \"\"\"Users", "m if set to non-zero value']) bot.config.module_config('officers', [[], 'Officers alts']) bot.config.module_config('allowdnd', [[], 'Allowed", "set themselves to not appear in player listing commands\"\"\" if input.nick not in", "False def invite(bot,input): \"\"\"invites to clan, admins only\"\"\" if not input.admin: return False", "- Rank: {1}, Last Online: {2}, Status: {3}\".format(nick, rank, data['last_activity'], status[bot.clan_status[id]])) else: bot.reply(\"Not", "bot.cookie, 'rank' : 'Remove', 'clan_id' : bot.clan_info['clan_id'] } bot.masterserver_request(query) bot.reply(nick + \" was", "in bot.clan_roster: if bot.clan_status[ply] is ID.HON_STATUS_ONLINE and (bot.clan_roster[ply]['rank'] in ['Officer', 'Leader'] or bot.id2nick[ply]", "Alt\"\"\" if not input.admin: return officers(bot, input) if not input.group(2): return nick =", "bot.clan_status[id] = data[1] member_changestatus.event = [ID.HON_SC_UPDATE_STATUS] def member_initstatus(bot,origin,data): for u in data[1]: id", "nick) bot.reply(\"Removed {0} from officer list\".format(nick)) else: bot.reply(nick + \" isn't an officer\")", "clan\") remove.commands = ['remove'] status = { ID.HON_STATUS_OFFLINE: \"Offline\", ID.HON_STATUS_ONLINE: \"Online\", ID.HON_STATUS_INLOBBY: \"In", "bot.config.officers: if ply in bot.user_status and bot.user_status[ply] is ID.HON_STATUS_ONLINE: avail_officers.append(bot.id2nick[ply]) if len(avail_officers) >", "in bot.nick2id: bot.reply(\"Error occurred\") return id = bot.nick2id[input.nick] if not id in bot.clan_roster", "if input.nick not in bot.nick2id: bot.reply(\"Error occurred\") return id = bot.nick2id[input.nick] if not", "only\"\"\" if not input.admin: return False nick = input.group(2).lower() if nick not in", "if ply in bot.user_status and bot.user_status[ply] is ID.HON_STATUS_ONLINE: avail_officers.append(bot.id2nick[ply]) if len(avail_officers) > 0:", "outstr = 'None' bot.reply( \"Available officers: {0}\".format( outstr ) ) officers.commands = ['officers']", "{0} from officer list\".format(nick)) else: bot.reply(nick + \" isn't an officer\") unofficer.commands =", "appear in player listing commands\"\"\" if input.nick not in bot.nick2id: bot.reply(\"Error occurred\") return", "input.nick not in bot.nick2id: bot.reply(\"Error occurred\") return id = bot.nick2id[input.nick] if not id", "in player listing commands\"\"\" if input.nick not in bot.nick2id: bot.reply(\"Error occurred\") return id", "if nick not in bot.nick2id: bot.reply(\"Unknown Player\") else: id = bot.nick2id[nick] if id", "outstr ) ) officers.commands = ['officers'] def officer(bot, input): \"\"\"Add Officer Alt\"\"\" if", "rank = player['rank'] query = {'nickname' : nick} query['f'] = 'show_stats' query['table'] =", "list\".format(nick)) else: bot.reply(nick + \" is already an officer\") officer.commands = ['officer'] def", "input.group(2): return nick = input.group(2).lower() if nick in bot.config.officers: bot.config.set_del('officers', nick) bot.reply(\"Removed {0}", "bot.nick2id[nick] bot.write_packet(ID.HON_CS_CLAN_REMOVE_MEMBER,id) query = { 'f' : 'set_rank', 'target_id' : id, 'member_ck': bot.cookie,", "not bot.clan_roster[id]['rank'] in ['Officer', 'Leader']): if not input.nick in bot.config.officers and not input.nick", "if not id in bot.clan_roster or (id in bot.clan_roster and not bot.clan_roster[id]['rank'] in", "not input.group(2): bot.reply( \"{0} - Members: {1}, Online: {2}, In-Game: {3}\" .format( bot.clan_info['name'],", "announce.commands = ['announce'] def dnd(bot, input): \"\"\"Users can set themselves to not appear", "sublist(alist, value): return [dictio for dictio in alist if alist[dictio] == value] def", "\"\"\"remove from clan, admins only\"\"\" if not input.admin: return False nick = input.group(2).lower()", "bot.reply(nick + \" isn't an officer\") unofficer.commands = ['unofficer'] def announce(bot, input): if", "{'nickname' : nick} query['f'] = 'show_stats' query['table'] = 'player' data = bot.masterserver_request(query,cookie=True) bot.reply(\"{0}", "nick = input.group(2).lower() if not nick in bot.config.officers: bot.config.set_add('officers', nick) bot.reply(\"Added {0} to", "'player' data = bot.masterserver_request(query,cookie=True) bot.reply(\"{0} - Rank: {1}, Last Online: {2}, Status: {3}\".format(nick,", "I don''t know ' + nick) else: id = bot.nick2id[nick] bot.write_packet(ID.HON_CS_CLAN_REMOVE_MEMBER,id) query =", "#bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) bot.clan_status[id] = u[1] elif bot.id2nick[id] == bot.config.owner: bot.clan_status[id] =", "[dictio for dictio in alist if alist[dictio] == value] def info(bot,input): \"\"\"Get clan", "input.group(2): return bot.write_packet(ID.HON_CS_CLAN_MESSAGE, input.group(2)) announce.commands = ['announce'] def dnd(bot, input): \"\"\"Users can set", "nick} query['f'] = 'show_stats' query['table'] = 'player' data = bot.masterserver_request(query,cookie=True) bot.reply(\"{0} - Rank:", "len(sublist(bot.clan_status, ID.HON_STATUS_ONLINE)), len(sublist(bot.clan_status, ID.HON_STATUS_INGAME)) ) ) else: nick = input.group(2).lower() if nick not", "in bot.nick2id: bot.reply(\"Unknown Player\") else: id = bot.nick2id[nick] if id in bot.clan_roster: player", "id in bot.clan_roster: player = bot.clan_roster[id] rank = player['rank'] query = {'nickname' :", "if bot.clan_status[ply] is ID.HON_STATUS_ONLINE and (bot.clan_roster[ply]['rank'] in ['Officer', 'Leader'] or bot.id2nick[ply] in bot.config.officers):", "nick in bot.config.officers: bot.config.set_add('officers', nick) bot.reply(\"Added {0} to officer list\".format(nick)) else: bot.reply(nick +", "in bot.dnd: continue if ply in bot.clan_status and ply in bot.clan_roster: if bot.clan_status[ply]", "bot.clan_roster: if u[1] in [ID.HON_STATUS_ONLINE, ID.HON_STATUS_INGAME]: nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick)", "or bot.id2nick[ply] in bot.config.officers): avail_officers.append(bot.id2nick[ply]) elif bot.id2nick[ply] in bot.config.officers: if ply in bot.user_status", "commands\"\"\" if input.nick not in bot.nick2id: bot.reply(\"Error occurred\") return id = bot.nick2id[input.nick] if", "for dictio in alist if alist[dictio] == value] def info(bot,input): \"\"\"Get clan member", "bot.reply('Sorry, I don''t know ' + nick) else: id = bot.nick2id[nick] bot.write_packet(ID.HON_CS_CLAN_REMOVE_MEMBER,id) query", "data[0] if id in bot.clan_roster: bot.clan_status[id] = data[1] if data[0] in [ID.HON_STATUS_OFFLINE]: for", "query = {'nickname' : nick} query['f'] = 'show_stats' query['table'] = 'player' data =", "in [ID.HON_STATUS_ONLINE, ID.HON_STATUS_INGAME]: nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) bot.clan_status[id] = u[1]", "is ID.HON_STATUS_ONLINE and (bot.clan_roster[ply]['rank'] in ['Officer', 'Leader'] or bot.id2nick[ply] in bot.config.officers): avail_officers.append(bot.id2nick[ply]) elif", "in clan\") info.commands = ['info'] def officers(bot, input): \"\"\"Find available officers\"\"\" avail_officers =", "if not input.admin: return False if not input.group(2): return bot.write_packet(ID.HON_CS_CLAN_MESSAGE, input.group(2)) announce.commands =", "+ \" was removed from the clan\") remove.commands = ['remove'] status = {", "bot.clan_roster[who]['rank'] = 'Officer' elif status == 3:#not sure about this one bot.clan_roster[who]['rank'] =", "invite.commands = ['invite'] def remove(bot,input): \"\"\"remove from clan, admins only\"\"\" if not input.admin:", "officers\"\"\" avail_officers = [] for ply in bot.id2nick: if ply == bot.account_id: continue", "{0}\".format(input.group(2))) invite.commands = ['invite'] def remove(bot,input): \"\"\"remove from clan, admins only\"\"\" if not", "= bot.nick2id[nick] bot.write_packet(ID.HON_CS_CLAN_REMOVE_MEMBER,id) query = { 'f' : 'set_rank', 'target_id' : id, 'member_ck':", "in bot.clan_roster: bot.clan_roster[who]['rank'] = 'Member' else: bot.clan_roster[who] = {\"rank\":\"Member\"} elif status == 2:", "bot.masterserver_request(query) bot.reply(nick + \" was removed from the clan\") remove.commands = ['remove'] status", "if not input.group(2): bot.reply( \"{0} - Members: {1}, Online: {2}, In-Game: {3}\" .format(", "bot.write_packet(ID.HON_CS_CLAN_MESSAGE, input.group(2)) announce.commands = ['announce'] def dnd(bot, input): \"\"\"Users can set themselves to", "bot.nick2id: bot.reply(\"Unknown Player\") else: id = bot.nick2id[nick] if id in bot.clan_roster: player =", "from hon.packets import ID def setup(bot): bot.config.module_config('welcome_members',[1,'Will welcome members in /c m if", "= [ID.HON_SC_CLAN_MEMBER_CHANGE] def add_member(bot,origin,data): id = data[0] bot.clan_roster[id] = {\"rank\":\"Member\"} if bot.config.welcome_members >", "nick: del(bot.dnd[key]) break if data[1] in [ID.HON_STATUS_ONLINE]: \"\"\" \"\"\" #nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades']", "invite(bot,input): \"\"\"invites to clan, admins only\"\"\" if not input.admin: return False bot.write_packet(ID.HON_CS_CLAN_ADD_MEMBER,input.group(2)) bot.reply(\"Invited", "in bot.config.officers: bot.config.set_del('officers', nick) bot.reply(\"Removed {0} from officer list\".format(nick)) else: bot.reply(nick + \"", "def member_initstatus(bot,origin,data): for u in data[1]: id = u[0] if id in bot.clan_roster:", "not input.admin: return officers(bot, input) if not input.group(2): return nick = input.group(2).lower() if", "dictio in alist if alist[dictio] == value] def info(bot,input): \"\"\"Get clan member info\"\"\"", "in bot.clan_roster: if u[1] in [ID.HON_STATUS_ONLINE, ID.HON_STATUS_INGAME]: nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot,", "occurred\") return id = bot.nick2id[input.nick] if not id in bot.clan_roster or (id in", "input): \"\"\"Add Officer Alt\"\"\" if not input.admin: return officers(bot, input) if not input.group(2):", "= data[0] bot.clan_roster[id] = {\"rank\":\"Member\"} if bot.config.welcome_members > 0 and id in bot.id2nick:", "bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = u[1] member_initstatus.event = [ID.HON_SC_INITIAL_STATUS] member_initstatus.thread = False def", "or (id in bot.clan_roster and not bot.clan_roster[id]['rank'] in ['Officer', 'Leader']): if not input.nick", "in bot.config.officers and not input.nick in bot.config.allowdnd: return for key, nick in enumerate(bot.dnd):", "in bot.clan_status and ply in bot.clan_roster: if bot.clan_status[ply] is ID.HON_STATUS_ONLINE and (bot.clan_roster[ply]['rank'] in", "for key, nick in enumerate(bot.dnd): if input.nick == nick: bot.reply(\"You are now available.\")", "\"\"\"Remove Officer Alt\"\"\" if not input.admin: return False if not input.group(2): return nick", "ply == bot.account_id: continue if bot.id2nick[ply] in bot.dnd: continue if ply in bot.clan_status", "1: if who in bot.clan_roster: bot.clan_roster[who]['rank'] = 'Member' else: bot.clan_roster[who] = {\"rank\":\"Member\"} elif", "available officers\"\"\" avail_officers = [] for ply in bot.id2nick: if ply == bot.account_id:", "are now available.\") del(bot.dnd[key]) return bot.reply(\"You are now unavailable.\") bot.dnd.append(input.nick) dnd.commands = ['dnd']", "if not input.nick in bot.config.officers and not input.nick in bot.config.allowdnd: return for key,", "bot.reply(\"Error occurred\") return id = bot.nick2id[input.nick] if not id in bot.clan_roster or (id", "bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = data[1] member_changestatus.event = [ID.HON_SC_UPDATE_STATUS] def member_initstatus(bot,origin,data): for u", "else: outstr = 'None' bot.reply( \"Available officers: {0}\".format( outstr ) ) officers.commands =", "u in data[1]: id = u[0] if id in bot.clan_roster: if u[1] in", "> 0: outstr = ', '.join(avail_officers) else: outstr = 'None' bot.reply( \"Available officers:", "bot.clan_roster[who]['rank'] = 'Member' else: bot.clan_roster[who] = {\"rank\":\"Member\"} elif status == 2: bot.clan_roster[who]['rank'] =", "'None' bot.reply( \"Available officers: {0}\".format( outstr ) ) officers.commands = ['officers'] def officer(bot,", "= ['announce'] def dnd(bot, input): \"\"\"Users can set themselves to not appear in", "= 'show_stats' query['table'] = 'player' data = bot.masterserver_request(query,cookie=True) bot.reply(\"{0} - Rank: {1}, Last", "return [dictio for dictio in alist if alist[dictio] == value] def info(bot,input): \"\"\"Get", "add_member(bot,origin,data): id = data[0] bot.clan_roster[id] = {\"rank\":\"Member\"} if bot.config.welcome_members > 0 and id", "input.group(2)) announce.commands = ['announce'] def dnd(bot, input): \"\"\"Users can set themselves to not", "= user_upgrades(bot, nick) bot.clan_status[id] = u[1] elif bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = u[1]", "input.admin: return officers(bot, input) if not input.group(2): return nick = input.group(2).lower() if not", "clan, admins only\"\"\" if not input.admin: return False nick = input.group(2).lower() if nick", "nick = input.group(2).lower() if nick in bot.config.officers: bot.config.set_del('officers', nick) bot.reply(\"Removed {0} from officer", "len(sublist(bot.clan_status, ID.HON_STATUS_INGAME)) ) ) else: nick = input.group(2).lower() if nick not in bot.nick2id:", "= data[1] member_changestatus.event = [ID.HON_SC_UPDATE_STATUS] def member_initstatus(bot,origin,data): for u in data[1]: id =", "def invite(bot,input): \"\"\"invites to clan, admins only\"\"\" if not input.admin: return False bot.write_packet(ID.HON_CS_CLAN_ADD_MEMBER,input.group(2))", "nick in bot.config.officers: bot.config.set_del('officers', nick) bot.reply(\"Removed {0} from officer list\".format(nick)) else: bot.reply(nick +", "{\"rank\":\"Member\"} if bot.config.welcome_members > 0 and id in bot.id2nick: nick = bot.id2nick[id] bot.write_packet(ID.HON_CS_CLAN_MESSAGE,'Welcome,", "key, nick in enumerate(bot.dnd): if id in bot.id2nick and bot.id2nick[id] == nick: del(bot.dnd[key])", "['Officer', 'Leader'] or bot.id2nick[ply] in bot.config.officers): avail_officers.append(bot.id2nick[ply]) elif bot.id2nick[ply] in bot.config.officers: if ply", "input.admin: return False bot.write_packet(ID.HON_CS_CLAN_ADD_MEMBER,input.group(2)) bot.reply(\"Invited {0}\".format(input.group(2))) invite.commands = ['invite'] def remove(bot,input): \"\"\"remove from", "\"{0} - Members: {1}, Online: {2}, In-Game: {3}\" .format( bot.clan_info['name'], len(bot.clan_roster), len(sublist(bot.clan_status, ID.HON_STATUS_ONLINE)),", "= 'Member' else: bot.clan_roster[who] = {\"rank\":\"Member\"} elif status == 2: bot.clan_roster[who]['rank'] = 'Officer'", ") else: nick = input.group(2).lower() if nick not in bot.nick2id: bot.reply(\"Unknown Player\") else:", "if not nick in bot.config.officers: bot.config.set_add('officers', nick) bot.reply(\"Added {0} to officer list\".format(nick)) else:", "id, 'member_ck': bot.cookie, 'rank' : 'Remove', 'clan_id' : bot.clan_info['clan_id'] } bot.masterserver_request(query) bot.reply(nick +", "if alist[dictio] == value] def info(bot,input): \"\"\"Get clan member info\"\"\" if not input.group(2):", "/c m if set to non-zero value']) bot.config.module_config('officers', [[], 'Officers alts']) bot.config.module_config('allowdnd', [[],", "input.group(2).lower() if not nick in bot.config.officers: bot.config.set_add('officers', nick) bot.reply(\"Added {0} to officer list\".format(nick))", "\" is already an officer\") officer.commands = ['officer'] def unofficer(bot, input): \"\"\"Remove Officer", "user_upgrades(bot, nick) elif id in bot.id2nick and bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = data[1]", "command']) bot.dnd = [] def change_member(bot,origin,data): who,status,whodid = data[0],data[1],data[2] if status == 0:", "use DND command']) bot.dnd = [] def change_member(bot,origin,data): who,status,whodid = data[0],data[1],data[2] if status", "id = bot.nick2id[nick] if id in bot.clan_roster: player = bot.clan_roster[id] rank = player['rank']", "id = bot.nick2id[nick] bot.write_packet(ID.HON_CS_CLAN_REMOVE_MEMBER,id) query = { 'f' : 'set_rank', 'target_id' : id,", "'Allowed to use DND command']) bot.dnd = [] def change_member(bot,origin,data): who,status,whodid = data[0],data[1],data[2]", "id = u[0] if id in bot.clan_roster: if u[1] in [ID.HON_STATUS_ONLINE, ID.HON_STATUS_INGAME]: nick", "Status: {3}\".format(nick, rank, data['last_activity'], status[bot.clan_status[id]])) else: bot.reply(\"Not in clan\") info.commands = ['info'] def", "data = bot.masterserver_request(query,cookie=True) bot.reply(\"{0} - Rank: {1}, Last Online: {2}, Status: {3}\".format(nick, rank,", "if not input.group(2): return nick = input.group(2).lower() if not nick in bot.config.officers: bot.config.set_add('officers',", "[ID.HON_SC_CLAN_MEMBER_ADDED] def member_changestatus(bot,origin,data): id = data[0] if id in bot.clan_roster: bot.clan_status[id] = data[1]", "bot.clan_roster[id] = {\"rank\":\"Member\"} if bot.config.welcome_members > 0 and id in bot.id2nick: nick =", "-*- coding: utf8 -*- from hon.packets import ID def setup(bot): bot.config.module_config('welcome_members',[1,'Will welcome members", "change_member.event = [ID.HON_SC_CLAN_MEMBER_CHANGE] def add_member(bot,origin,data): id = data[0] bot.clan_roster[id] = {\"rank\":\"Member\"} if bot.config.welcome_members", "bot.id2nick[ply] in bot.config.officers: if ply in bot.user_status and bot.user_status[ply] is ID.HON_STATUS_ONLINE: avail_officers.append(bot.id2nick[ply]) if", "= [ID.HON_SC_CLAN_MEMBER_ADDED] def member_changestatus(bot,origin,data): id = data[0] if id in bot.clan_roster: bot.clan_status[id] =", "for key, nick in enumerate(bot.dnd): if id in bot.id2nick and bot.id2nick[id] == nick:", "nick in enumerate(bot.dnd): if id in bot.id2nick and bot.id2nick[id] == nick: del(bot.dnd[key]) break", "from officer list\".format(nick)) else: bot.reply(nick + \" isn't an officer\") unofficer.commands = ['unofficer']", "if status == 0: del(bot.clan_roster[who]) elif status == 1: if who in bot.clan_roster:", "#bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) elif id in bot.id2nick and bot.id2nick[id] == bot.config.owner: bot.clan_status[id]", "bot.clan_roster: bot.clan_status[id] = data[1] if data[0] in [ID.HON_STATUS_OFFLINE]: for key, nick in enumerate(bot.dnd):", "is ID.HON_STATUS_ONLINE: avail_officers.append(bot.id2nick[ply]) if len(avail_officers) > 0: outstr = ', '.join(avail_officers) else: outstr", ": nick} query['f'] = 'show_stats' query['table'] = 'player' data = bot.masterserver_request(query,cookie=True) bot.reply(\"{0} -", "== 3:#not sure about this one bot.clan_roster[who]['rank'] = 'Leader' change_member.event = [ID.HON_SC_CLAN_MEMBER_CHANGE] def", "from clan, admins only\"\"\" if not input.admin: return False nick = input.group(2).lower() if", "info\"\"\" if not input.group(2): bot.reply( \"{0} - Members: {1}, Online: {2}, In-Game: {3}\"", "if id in bot.clan_roster: bot.clan_status[id] = data[1] if data[0] in [ID.HON_STATUS_OFFLINE]: for key,", ") ) else: nick = input.group(2).lower() if nick not in bot.nick2id: bot.reply(\"Unknown Player\")", "[[], 'Officers alts']) bot.config.module_config('allowdnd', [[], 'Allowed to use DND command']) bot.dnd = []", "officer(bot, input): \"\"\"Add Officer Alt\"\"\" if not input.admin: return officers(bot, input) if not", "not input.group(2): return nick = input.group(2).lower() if not nick in bot.config.officers: bot.config.set_add('officers', nick)", "is already an officer\") officer.commands = ['officer'] def unofficer(bot, input): \"\"\"Remove Officer Alt\"\"\"", "info.commands = ['info'] def officers(bot, input): \"\"\"Find available officers\"\"\" avail_officers = [] for", "an officer\") unofficer.commands = ['unofficer'] def announce(bot, input): if not input.admin: return False", "in bot.config.officers: bot.config.set_add('officers', nick) bot.reply(\"Added {0} to officer list\".format(nick)) else: bot.reply(nick + \"", "\"\"\" \"\"\" #nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) elif id in bot.id2nick", "if who in bot.clan_roster: bot.clan_roster[who]['rank'] = 'Member' else: bot.clan_roster[who] = {\"rank\":\"Member\"} elif status", "know ' + nick) else: id = bot.nick2id[nick] bot.write_packet(ID.HON_CS_CLAN_REMOVE_MEMBER,id) query = { 'f'", "and ply in bot.clan_roster: if bot.clan_status[ply] is ID.HON_STATUS_ONLINE and (bot.clan_roster[ply]['rank'] in ['Officer', 'Leader']", "if input.nick == nick: bot.reply(\"You are now available.\") del(bot.dnd[key]) return bot.reply(\"You are now", "bot.write_packet(ID.HON_CS_CLAN_MESSAGE,'Welcome, {0}!'.format(nick)) add_member.event = [ID.HON_SC_CLAN_MEMBER_ADDED] def member_changestatus(bot,origin,data): id = data[0] if id in", "id in bot.id2nick and bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = data[1] member_changestatus.event = [ID.HON_SC_UPDATE_STATUS]", "break if data[1] in [ID.HON_STATUS_ONLINE]: \"\"\" \"\"\" #nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot,", "{1}, Last Online: {2}, Status: {3}\".format(nick, rank, data['last_activity'], status[bot.clan_status[id]])) else: bot.reply(\"Not in clan\")", "announce(bot, input): if not input.admin: return False if not input.group(2): return bot.write_packet(ID.HON_CS_CLAN_MESSAGE, input.group(2))", "> 0 and id in bot.id2nick: nick = bot.id2nick[id] bot.write_packet(ID.HON_CS_CLAN_MESSAGE,'Welcome, {0}!'.format(nick)) add_member.event =", "import ID def setup(bot): bot.config.module_config('welcome_members',[1,'Will welcome members in /c m if set to", "data[0] bot.clan_roster[id] = {\"rank\":\"Member\"} if bot.config.welcome_members > 0 and id in bot.id2nick: nick", "== 0: del(bot.clan_roster[who]) elif status == 1: if who in bot.clan_roster: bot.clan_roster[who]['rank'] =", "return id = bot.nick2id[input.nick] if not id in bot.clan_roster or (id in bot.clan_roster", "else: bot.reply(\"Not in clan\") info.commands = ['info'] def officers(bot, input): \"\"\"Find available officers\"\"\"", "\"Offline\", ID.HON_STATUS_ONLINE: \"Online\", ID.HON_STATUS_INLOBBY: \"In Lobby\", ID.HON_STATUS_INGAME: \"In Game\" } def sublist(alist, value):", "bot.clan_roster[who]['rank'] = 'Leader' change_member.event = [ID.HON_SC_CLAN_MEMBER_CHANGE] def add_member(bot,origin,data): id = data[0] bot.clan_roster[id] =", "Lobby\", ID.HON_STATUS_INGAME: \"In Game\" } def sublist(alist, value): return [dictio for dictio in", "bot.config.module_config('officers', [[], 'Officers alts']) bot.config.module_config('allowdnd', [[], 'Allowed to use DND command']) bot.dnd =", "input) if not input.group(2): return nick = input.group(2).lower() if not nick in bot.config.officers:", "In-Game: {3}\" .format( bot.clan_info['name'], len(bot.clan_roster), len(sublist(bot.clan_status, ID.HON_STATUS_ONLINE)), len(sublist(bot.clan_status, ID.HON_STATUS_INGAME)) ) ) else: nick", "in ['Officer', 'Leader'] or bot.id2nick[ply] in bot.config.officers): avail_officers.append(bot.id2nick[ply]) elif bot.id2nick[ply] in bot.config.officers: if", "nick) bot.clan_status[id] = u[1] elif bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = u[1] member_initstatus.event =", "= { ID.HON_STATUS_OFFLINE: \"Offline\", ID.HON_STATUS_ONLINE: \"Online\", ID.HON_STATUS_INLOBBY: \"In Lobby\", ID.HON_STATUS_INGAME: \"In Game\" }", "nick = bot.id2nick[id] bot.write_packet(ID.HON_CS_CLAN_MESSAGE,'Welcome, {0}!'.format(nick)) add_member.event = [ID.HON_SC_CLAN_MEMBER_ADDED] def member_changestatus(bot,origin,data): id = data[0]", "sure about this one bot.clan_roster[who]['rank'] = 'Leader' change_member.event = [ID.HON_SC_CLAN_MEMBER_CHANGE] def add_member(bot,origin,data): id", "officer list\".format(nick)) else: bot.reply(nick + \" isn't an officer\") unofficer.commands = ['unofficer'] def", "query['f'] = 'show_stats' query['table'] = 'player' data = bot.masterserver_request(query,cookie=True) bot.reply(\"{0} - Rank: {1},", "ID.HON_STATUS_INLOBBY: \"In Lobby\", ID.HON_STATUS_INGAME: \"In Game\" } def sublist(alist, value): return [dictio for", "elif bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = u[1] member_initstatus.event = [ID.HON_SC_INITIAL_STATUS] member_initstatus.thread = False", "def officers(bot, input): \"\"\"Find available officers\"\"\" avail_officers = [] for ply in bot.id2nick:", "'show_stats' query['table'] = 'player' data = bot.masterserver_request(query,cookie=True) bot.reply(\"{0} - Rank: {1}, Last Online:", "in alist if alist[dictio] == value] def info(bot,input): \"\"\"Get clan member info\"\"\" if", "bot.config.module_config('allowdnd', [[], 'Allowed to use DND command']) bot.dnd = [] def change_member(bot,origin,data): who,status,whodid", "set to non-zero value']) bot.config.module_config('officers', [[], 'Officers alts']) bot.config.module_config('allowdnd', [[], 'Allowed to use", "= input.group(2).lower() if not nick in bot.config.officers: bot.config.set_add('officers', nick) bot.reply(\"Added {0} to officer", "bot.config.set_add('officers', nick) bot.reply(\"Added {0} to officer list\".format(nick)) else: bot.reply(nick + \" is already", "Officer Alt\"\"\" if not input.admin: return officers(bot, input) if not input.group(2): return nick", "bot.clan_roster or (id in bot.clan_roster and not bot.clan_roster[id]['rank'] in ['Officer', 'Leader']): if not", "+ \" isn't an officer\") unofficer.commands = ['unofficer'] def announce(bot, input): if not", "= data[1] if data[0] in [ID.HON_STATUS_OFFLINE]: for key, nick in enumerate(bot.dnd): if id", "add_member.event = [ID.HON_SC_CLAN_MEMBER_ADDED] def member_changestatus(bot,origin,data): id = data[0] if id in bot.clan_roster: bot.clan_status[id]", "bot.reply(\"{0} - Rank: {1}, Last Online: {2}, Status: {3}\".format(nick, rank, data['last_activity'], status[bot.clan_status[id]])) else:", "avail_officers.append(bot.id2nick[ply]) if len(avail_officers) > 0: outstr = ', '.join(avail_officers) else: outstr = 'None'", "and not input.nick in bot.config.allowdnd: return for key, nick in enumerate(bot.dnd): if input.nick", "list\".format(nick)) else: bot.reply(nick + \" isn't an officer\") unofficer.commands = ['unofficer'] def announce(bot,", "'Leader' change_member.event = [ID.HON_SC_CLAN_MEMBER_CHANGE] def add_member(bot,origin,data): id = data[0] bot.clan_roster[id] = {\"rank\":\"Member\"} if", "status == 2: bot.clan_roster[who]['rank'] = 'Officer' elif status == 3:#not sure about this", "input.admin: return False if not input.group(2): return nick = input.group(2).lower() if nick in", "\"Online\", ID.HON_STATUS_INLOBBY: \"In Lobby\", ID.HON_STATUS_INGAME: \"In Game\" } def sublist(alist, value): return [dictio", "{0}\".format( outstr ) ) officers.commands = ['officers'] def officer(bot, input): \"\"\"Add Officer Alt\"\"\"", "#nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) elif id in bot.id2nick and bot.id2nick[id]", "bot.id2nick[ply] in bot.config.officers): avail_officers.append(bot.id2nick[ply]) elif bot.id2nick[ply] in bot.config.officers: if ply in bot.user_status and", "in /c m if set to non-zero value']) bot.config.module_config('officers', [[], 'Officers alts']) bot.config.module_config('allowdnd',", "def remove(bot,input): \"\"\"remove from clan, admins only\"\"\" if not input.admin: return False nick", "to use DND command']) bot.dnd = [] def change_member(bot,origin,data): who,status,whodid = data[0],data[1],data[2] if", "to non-zero value']) bot.config.module_config('officers', [[], 'Officers alts']) bot.config.module_config('allowdnd', [[], 'Allowed to use DND", "value] def info(bot,input): \"\"\"Get clan member info\"\"\" if not input.group(2): bot.reply( \"{0} -", "Rank: {1}, Last Online: {2}, Status: {3}\".format(nick, rank, data['last_activity'], status[bot.clan_status[id]])) else: bot.reply(\"Not in", "\" isn't an officer\") unofficer.commands = ['unofficer'] def announce(bot, input): if not input.admin:", "ply in bot.clan_status and ply in bot.clan_roster: if bot.clan_status[ply] is ID.HON_STATUS_ONLINE and (bot.clan_roster[ply]['rank']", "'f' : 'set_rank', 'target_id' : id, 'member_ck': bot.cookie, 'rank' : 'Remove', 'clan_id' :", "in bot.config.officers: if ply in bot.user_status and bot.user_status[ply] is ID.HON_STATUS_ONLINE: avail_officers.append(bot.id2nick[ply]) if len(avail_officers)", "alts']) bot.config.module_config('allowdnd', [[], 'Allowed to use DND command']) bot.dnd = [] def change_member(bot,origin,data):", "' + nick) else: id = bot.nick2id[nick] bot.write_packet(ID.HON_CS_CLAN_REMOVE_MEMBER,id) query = { 'f' :", "['unofficer'] def announce(bot, input): if not input.admin: return False if not input.group(2): return", "== bot.config.owner: bot.clan_status[id] = u[1] member_initstatus.event = [ID.HON_SC_INITIAL_STATUS] member_initstatus.thread = False def invite(bot,input):", "= [ID.HON_SC_UPDATE_STATUS] def member_initstatus(bot,origin,data): for u in data[1]: id = u[0] if id", "who,status,whodid = data[0],data[1],data[2] if status == 0: del(bot.clan_roster[who]) elif status == 1: if", "if not input.admin: return False nick = input.group(2).lower() if nick not in bot.nick2id:", "Alt\"\"\" if not input.admin: return False if not input.group(2): return nick = input.group(2).lower()", "= 'player' data = bot.masterserver_request(query,cookie=True) bot.reply(\"{0} - Rank: {1}, Last Online: {2}, Status:", "themselves to not appear in player listing commands\"\"\" if input.nick not in bot.nick2id:", "bot.reply(\"Not in clan\") info.commands = ['info'] def officers(bot, input): \"\"\"Find available officers\"\"\" avail_officers", "member info\"\"\" if not input.group(2): bot.reply( \"{0} - Members: {1}, Online: {2}, In-Game:", "in bot.id2nick and bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = data[1] member_changestatus.event = [ID.HON_SC_UPDATE_STATUS] def", "bot.config.officers: bot.config.set_del('officers', nick) bot.reply(\"Removed {0} from officer list\".format(nick)) else: bot.reply(nick + \" isn't", "listing commands\"\"\" if input.nick not in bot.nick2id: bot.reply(\"Error occurred\") return id = bot.nick2id[input.nick]", "input.nick == nick: bot.reply(\"You are now available.\") del(bot.dnd[key]) return bot.reply(\"You are now unavailable.\")", "nick = input.group(2).lower() if nick not in bot.nick2id: bot.reply(\"Unknown Player\") else: id =", "if nick in bot.config.officers: bot.config.set_del('officers', nick) bot.reply(\"Removed {0} from officer list\".format(nick)) else: bot.reply(nick", "not id in bot.clan_roster or (id in bot.clan_roster and not bot.clan_roster[id]['rank'] in ['Officer',", "bot.id2nick and bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = data[1] member_changestatus.event = [ID.HON_SC_UPDATE_STATUS] def member_initstatus(bot,origin,data):", ") officers.commands = ['officers'] def officer(bot, input): \"\"\"Add Officer Alt\"\"\" if not input.admin:", "2: bot.clan_roster[who]['rank'] = 'Officer' elif status == 3:#not sure about this one bot.clan_roster[who]['rank']", "bot.nick2id: bot.reply(\"Error occurred\") return id = bot.nick2id[input.nick] if not id in bot.clan_roster or", "nick not in bot.nick2id: bot.reply('Sorry, I don''t know ' + nick) else: id", "return nick = input.group(2).lower() if nick in bot.config.officers: bot.config.set_del('officers', nick) bot.reply(\"Removed {0} from", "bot.reply(nick + \" was removed from the clan\") remove.commands = ['remove'] status =", "0 and id in bot.id2nick: nick = bot.id2nick[id] bot.write_packet(ID.HON_CS_CLAN_MESSAGE,'Welcome, {0}!'.format(nick)) add_member.event = [ID.HON_SC_CLAN_MEMBER_ADDED]", "status = { ID.HON_STATUS_OFFLINE: \"Offline\", ID.HON_STATUS_ONLINE: \"Online\", ID.HON_STATUS_INLOBBY: \"In Lobby\", ID.HON_STATUS_INGAME: \"In Game\"", "input.group(2).lower() if nick not in bot.nick2id: bot.reply(\"Unknown Player\") else: id = bot.nick2id[nick] if", "input.group(2).lower() if nick not in bot.nick2id: bot.reply('Sorry, I don''t know ' + nick)", "= bot.id2nick[id] bot.write_packet(ID.HON_CS_CLAN_MESSAGE,'Welcome, {0}!'.format(nick)) add_member.event = [ID.HON_SC_CLAN_MEMBER_ADDED] def member_changestatus(bot,origin,data): id = data[0] if", "= bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) bot.clan_status[id] = u[1] elif bot.id2nick[id] == bot.config.owner:", "else: bot.clan_roster[who] = {\"rank\":\"Member\"} elif status == 2: bot.clan_roster[who]['rank'] = 'Officer' elif status", "ply in bot.id2nick: if ply == bot.account_id: continue if bot.id2nick[ply] in bot.dnd: continue", "= {\"rank\":\"Member\"} elif status == 2: bot.clan_roster[who]['rank'] = 'Officer' elif status == 3:#not", "enumerate(bot.dnd): if input.nick == nick: bot.reply(\"You are now available.\") del(bot.dnd[key]) return bot.reply(\"You are", "bot.reply(\"Removed {0} from officer list\".format(nick)) else: bot.reply(nick + \" isn't an officer\") unofficer.commands", "'.join(avail_officers) else: outstr = 'None' bot.reply( \"Available officers: {0}\".format( outstr ) ) officers.commands", "= data[0],data[1],data[2] if status == 0: del(bot.clan_roster[who]) elif status == 1: if who", "'member_ck': bot.cookie, 'rank' : 'Remove', 'clan_id' : bot.clan_info['clan_id'] } bot.masterserver_request(query) bot.reply(nick + \"", "value']) bot.config.module_config('officers', [[], 'Officers alts']) bot.config.module_config('allowdnd', [[], 'Allowed to use DND command']) bot.dnd", "} bot.masterserver_request(query) bot.reply(nick + \" was removed from the clan\") remove.commands = ['remove']", "input.group(2): bot.reply( \"{0} - Members: {1}, Online: {2}, In-Game: {3}\" .format( bot.clan_info['name'], len(bot.clan_roster),", "= { 'f' : 'set_rank', 'target_id' : id, 'member_ck': bot.cookie, 'rank' : 'Remove',", "not appear in player listing commands\"\"\" if input.nick not in bot.nick2id: bot.reply(\"Error occurred\")", "= {\"rank\":\"Member\"} if bot.config.welcome_members > 0 and id in bot.id2nick: nick = bot.id2nick[id]", "del(bot.clan_roster[who]) elif status == 1: if who in bot.clan_roster: bot.clan_roster[who]['rank'] = 'Member' else:", "if not input.admin: return False bot.write_packet(ID.HON_CS_CLAN_ADD_MEMBER,input.group(2)) bot.reply(\"Invited {0}\".format(input.group(2))) invite.commands = ['invite'] def remove(bot,input):", "clan member info\"\"\" if not input.group(2): bot.reply( \"{0} - Members: {1}, Online: {2},", ": bot.clan_info['clan_id'] } bot.masterserver_request(query) bot.reply(nick + \" was removed from the clan\") remove.commands", "officers(bot, input): \"\"\"Find available officers\"\"\" avail_officers = [] for ply in bot.id2nick: if", "bot.user_status[ply] is ID.HON_STATUS_ONLINE: avail_officers.append(bot.id2nick[ply]) if len(avail_officers) > 0: outstr = ', '.join(avail_officers) else:", "['Officer', 'Leader']): if not input.nick in bot.config.officers and not input.nick in bot.config.allowdnd: return", "bot.nick2id: bot.reply('Sorry, I don''t know ' + nick) else: id = bot.nick2id[nick] bot.write_packet(ID.HON_CS_CLAN_REMOVE_MEMBER,id)", ": 'set_rank', 'target_id' : id, 'member_ck': bot.cookie, 'rank' : 'Remove', 'clan_id' : bot.clan_info['clan_id']", "ID def setup(bot): bot.config.module_config('welcome_members',[1,'Will welcome members in /c m if set to non-zero", "Online: {2}, Status: {3}\".format(nick, rank, data['last_activity'], status[bot.clan_status[id]])) else: bot.reply(\"Not in clan\") info.commands =", "avail_officers.append(bot.id2nick[ply]) elif bot.id2nick[ply] in bot.config.officers: if ply in bot.user_status and bot.user_status[ply] is ID.HON_STATUS_ONLINE:", "[ID.HON_SC_INITIAL_STATUS] member_initstatus.thread = False def invite(bot,input): \"\"\"invites to clan, admins only\"\"\" if not", "= [] for ply in bot.id2nick: if ply == bot.account_id: continue if bot.id2nick[ply]", "input): if not input.admin: return False if not input.group(2): return bot.write_packet(ID.HON_CS_CLAN_MESSAGE, input.group(2)) announce.commands", "input.admin: return False if not input.group(2): return bot.write_packet(ID.HON_CS_CLAN_MESSAGE, input.group(2)) announce.commands = ['announce'] def", "alist if alist[dictio] == value] def info(bot,input): \"\"\"Get clan member info\"\"\" if not", "if id in bot.clan_roster: player = bot.clan_roster[id] rank = player['rank'] query = {'nickname'", "and bot.user_status[ply] is ID.HON_STATUS_ONLINE: avail_officers.append(bot.id2nick[ply]) if len(avail_officers) > 0: outstr = ', '.join(avail_officers)", "change_member(bot,origin,data): who,status,whodid = data[0],data[1],data[2] if status == 0: del(bot.clan_roster[who]) elif status == 1:", "bot.config.set_del('officers', nick) bot.reply(\"Removed {0} from officer list\".format(nick)) else: bot.reply(nick + \" isn't an", "if id in bot.clan_roster: if u[1] in [ID.HON_STATUS_ONLINE, ID.HON_STATUS_INGAME]: nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades']", "ID.HON_STATUS_OFFLINE: \"Offline\", ID.HON_STATUS_ONLINE: \"Online\", ID.HON_STATUS_INLOBBY: \"In Lobby\", ID.HON_STATUS_INGAME: \"In Game\" } def sublist(alist,", "admins only\"\"\" if not input.admin: return False bot.write_packet(ID.HON_CS_CLAN_ADD_MEMBER,input.group(2)) bot.reply(\"Invited {0}\".format(input.group(2))) invite.commands = ['invite']", "rank, data['last_activity'], status[bot.clan_status[id]])) else: bot.reply(\"Not in clan\") info.commands = ['info'] def officers(bot, input):", "= False def invite(bot,input): \"\"\"invites to clan, admins only\"\"\" if not input.admin: return", "id = data[0] bot.clan_roster[id] = {\"rank\":\"Member\"} if bot.config.welcome_members > 0 and id in", "dnd(bot, input): \"\"\"Users can set themselves to not appear in player listing commands\"\"\"", "nick) else: id = bot.nick2id[nick] bot.write_packet(ID.HON_CS_CLAN_REMOVE_MEMBER,id) query = { 'f' : 'set_rank', 'target_id'", "utf8 -*- from hon.packets import ID def setup(bot): bot.config.module_config('welcome_members',[1,'Will welcome members in /c", "setup(bot): bot.config.module_config('welcome_members',[1,'Will welcome members in /c m if set to non-zero value']) bot.config.module_config('officers',", "if bot.config.welcome_members > 0 and id in bot.id2nick: nick = bot.id2nick[id] bot.write_packet(ID.HON_CS_CLAN_MESSAGE,'Welcome, {0}!'.format(nick))", "def member_changestatus(bot,origin,data): id = data[0] if id in bot.clan_roster: bot.clan_status[id] = data[1] if", "input.nick in bot.config.allowdnd: return for key, nick in enumerate(bot.dnd): if input.nick == nick:", "['invite'] def remove(bot,input): \"\"\"remove from clan, admins only\"\"\" if not input.admin: return False", "this one bot.clan_roster[who]['rank'] = 'Leader' change_member.event = [ID.HON_SC_CLAN_MEMBER_CHANGE] def add_member(bot,origin,data): id = data[0]", "not input.admin: return False bot.write_packet(ID.HON_CS_CLAN_ADD_MEMBER,input.group(2)) bot.reply(\"Invited {0}\".format(input.group(2))) invite.commands = ['invite'] def remove(bot,input): \"\"\"remove", "query['table'] = 'player' data = bot.masterserver_request(query,cookie=True) bot.reply(\"{0} - Rank: {1}, Last Online: {2},", "= ['invite'] def remove(bot,input): \"\"\"remove from clan, admins only\"\"\" if not input.admin: return", "= 'Officer' elif status == 3:#not sure about this one bot.clan_roster[who]['rank'] = 'Leader'", "bot.clan_roster[who] = {\"rank\":\"Member\"} elif status == 2: bot.clan_roster[who]['rank'] = 'Officer' elif status ==", "and (bot.clan_roster[ply]['rank'] in ['Officer', 'Leader'] or bot.id2nick[ply] in bot.config.officers): avail_officers.append(bot.id2nick[ply]) elif bot.id2nick[ply] in", "bot.clan_status[ply] is ID.HON_STATUS_ONLINE and (bot.clan_roster[ply]['rank'] in ['Officer', 'Leader'] or bot.id2nick[ply] in bot.config.officers): avail_officers.append(bot.id2nick[ply])", "removed from the clan\") remove.commands = ['remove'] status = { ID.HON_STATUS_OFFLINE: \"Offline\", ID.HON_STATUS_ONLINE:", "\"In Game\" } def sublist(alist, value): return [dictio for dictio in alist if", "player = bot.clan_roster[id] rank = player['rank'] query = {'nickname' : nick} query['f'] =", "in bot.user_status and bot.user_status[ply] is ID.HON_STATUS_ONLINE: avail_officers.append(bot.id2nick[ply]) if len(avail_officers) > 0: outstr =", "in bot.id2nick: nick = bot.id2nick[id] bot.write_packet(ID.HON_CS_CLAN_MESSAGE,'Welcome, {0}!'.format(nick)) add_member.event = [ID.HON_SC_CLAN_MEMBER_ADDED] def member_changestatus(bot,origin,data): id", "u[0] if id in bot.clan_roster: if u[1] in [ID.HON_STATUS_ONLINE, ID.HON_STATUS_INGAME]: nick = bot.id2nick[id]", "if not input.admin: return False if not input.group(2): return nick = input.group(2).lower() if", "ID.HON_STATUS_ONLINE: \"Online\", ID.HON_STATUS_INLOBBY: \"In Lobby\", ID.HON_STATUS_INGAME: \"In Game\" } def sublist(alist, value): return", "continue if ply in bot.clan_status and ply in bot.clan_roster: if bot.clan_status[ply] is ID.HON_STATUS_ONLINE", "value): return [dictio for dictio in alist if alist[dictio] == value] def info(bot,input):", "return False bot.write_packet(ID.HON_CS_CLAN_ADD_MEMBER,input.group(2)) bot.reply(\"Invited {0}\".format(input.group(2))) invite.commands = ['invite'] def remove(bot,input): \"\"\"remove from clan,", "def setup(bot): bot.config.module_config('welcome_members',[1,'Will welcome members in /c m if set to non-zero value'])", ": id, 'member_ck': bot.cookie, 'rank' : 'Remove', 'clan_id' : bot.clan_info['clan_id'] } bot.masterserver_request(query) bot.reply(nick", "def announce(bot, input): if not input.admin: return False if not input.group(2): return bot.write_packet(ID.HON_CS_CLAN_MESSAGE,", "{3}\".format(nick, rank, data['last_activity'], status[bot.clan_status[id]])) else: bot.reply(\"Not in clan\") info.commands = ['info'] def officers(bot,", "len(avail_officers) > 0: outstr = ', '.join(avail_officers) else: outstr = 'None' bot.reply( \"Available", "\"Available officers: {0}\".format( outstr ) ) officers.commands = ['officers'] def officer(bot, input): \"\"\"Add", "== 1: if who in bot.clan_roster: bot.clan_roster[who]['rank'] = 'Member' else: bot.clan_roster[who] = {\"rank\":\"Member\"}", "['officers'] def officer(bot, input): \"\"\"Add Officer Alt\"\"\" if not input.admin: return officers(bot, input)", "to not appear in player listing commands\"\"\" if input.nick not in bot.nick2id: bot.reply(\"Error", "already an officer\") officer.commands = ['officer'] def unofficer(bot, input): \"\"\"Remove Officer Alt\"\"\" if", "in ['Officer', 'Leader']): if not input.nick in bot.config.officers and not input.nick in bot.config.allowdnd:", "elif status == 3:#not sure about this one bot.clan_roster[who]['rank'] = 'Leader' change_member.event =", "bot.config.module_config('welcome_members',[1,'Will welcome members in /c m if set to non-zero value']) bot.config.module_config('officers', [[],", "u[1] in [ID.HON_STATUS_ONLINE, ID.HON_STATUS_INGAME]: nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) bot.clan_status[id] =", "bot.masterserver_request(query,cookie=True) bot.reply(\"{0} - Rank: {1}, Last Online: {2}, Status: {3}\".format(nick, rank, data['last_activity'], status[bot.clan_status[id]]))", "if id in bot.id2nick and bot.id2nick[id] == nick: del(bot.dnd[key]) break if data[1] in", "officer.commands = ['officer'] def unofficer(bot, input): \"\"\"Remove Officer Alt\"\"\" if not input.admin: return", "[ID.HON_STATUS_ONLINE]: \"\"\" \"\"\" #nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) elif id in", "bot.reply(\"Unknown Player\") else: id = bot.nick2id[nick] if id in bot.clan_roster: player = bot.clan_roster[id]", "in bot.id2nick and bot.id2nick[id] == nick: del(bot.dnd[key]) break if data[1] in [ID.HON_STATUS_ONLINE]: \"\"\"", "== nick: del(bot.dnd[key]) break if data[1] in [ID.HON_STATUS_ONLINE]: \"\"\" \"\"\" #nick = bot.id2nick[id]", "{0}!'.format(nick)) add_member.event = [ID.HON_SC_CLAN_MEMBER_ADDED] def member_changestatus(bot,origin,data): id = data[0] if id in bot.clan_roster:", ".format( bot.clan_info['name'], len(bot.clan_roster), len(sublist(bot.clan_status, ID.HON_STATUS_ONLINE)), len(sublist(bot.clan_status, ID.HON_STATUS_INGAME)) ) ) else: nick = input.group(2).lower()", "def sublist(alist, value): return [dictio for dictio in alist if alist[dictio] == value]", "if u[1] in [ID.HON_STATUS_ONLINE, ID.HON_STATUS_INGAME]: nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) bot.clan_status[id]", "'clan_id' : bot.clan_info['clan_id'] } bot.masterserver_request(query) bot.reply(nick + \" was removed from the clan\")", "data['last_activity'], status[bot.clan_status[id]])) else: bot.reply(\"Not in clan\") info.commands = ['info'] def officers(bot, input): \"\"\"Find", "status[bot.clan_status[id]])) else: bot.reply(\"Not in clan\") info.commands = ['info'] def officers(bot, input): \"\"\"Find available", "for u in data[1]: id = u[0] if id in bot.clan_roster: if u[1]", "= ['unofficer'] def announce(bot, input): if not input.admin: return False if not input.group(2):", "[ID.HON_STATUS_ONLINE, ID.HON_STATUS_INGAME]: nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) bot.clan_status[id] = u[1] elif", "bot.id2nick[id] == nick: del(bot.dnd[key]) break if data[1] in [ID.HON_STATUS_ONLINE]: \"\"\" \"\"\" #nick =", "= u[1] member_initstatus.event = [ID.HON_SC_INITIAL_STATUS] member_initstatus.thread = False def invite(bot,input): \"\"\"invites to clan,", "bot.reply( \"Available officers: {0}\".format( outstr ) ) officers.commands = ['officers'] def officer(bot, input):", "\"\"\"Add Officer Alt\"\"\" if not input.admin: return officers(bot, input) if not input.group(2): return", "= player['rank'] query = {'nickname' : nick} query['f'] = 'show_stats' query['table'] = 'player'", "to clan, admins only\"\"\" if not input.admin: return False bot.write_packet(ID.HON_CS_CLAN_ADD_MEMBER,input.group(2)) bot.reply(\"Invited {0}\".format(input.group(2))) invite.commands", "bot.id2nick[ply] in bot.dnd: continue if ply in bot.clan_status and ply in bot.clan_roster: if", "3:#not sure about this one bot.clan_roster[who]['rank'] = 'Leader' change_member.event = [ID.HON_SC_CLAN_MEMBER_CHANGE] def add_member(bot,origin,data):", "bot.user_status and bot.user_status[ply] is ID.HON_STATUS_ONLINE: avail_officers.append(bot.id2nick[ply]) if len(avail_officers) > 0: outstr = ',", "status == 3:#not sure about this one bot.clan_roster[who]['rank'] = 'Leader' change_member.event = [ID.HON_SC_CLAN_MEMBER_CHANGE]", "= ['remove'] status = { ID.HON_STATUS_OFFLINE: \"Offline\", ID.HON_STATUS_ONLINE: \"Online\", ID.HON_STATUS_INLOBBY: \"In Lobby\", ID.HON_STATUS_INGAME:", "{2}, Status: {3}\".format(nick, rank, data['last_activity'], status[bot.clan_status[id]])) else: bot.reply(\"Not in clan\") info.commands = ['info']", "'Member' else: bot.clan_roster[who] = {\"rank\":\"Member\"} elif status == 2: bot.clan_roster[who]['rank'] = 'Officer' elif", "elif id in bot.id2nick and bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = data[1] member_changestatus.event =", "bot.clan_roster: bot.clan_roster[who]['rank'] = 'Member' else: bot.clan_roster[who] = {\"rank\":\"Member\"} elif status == 2: bot.clan_roster[who]['rank']", "one bot.clan_roster[who]['rank'] = 'Leader' change_member.event = [ID.HON_SC_CLAN_MEMBER_CHANGE] def add_member(bot,origin,data): id = data[0] bot.clan_roster[id]", "ID.HON_STATUS_INGAME]: nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) bot.clan_status[id] = u[1] elif bot.id2nick[id]", "Last Online: {2}, Status: {3}\".format(nick, rank, data['last_activity'], status[bot.clan_status[id]])) else: bot.reply(\"Not in clan\") info.commands", "= ['officer'] def unofficer(bot, input): \"\"\"Remove Officer Alt\"\"\" if not input.admin: return False", "id in bot.clan_roster or (id in bot.clan_roster and not bot.clan_roster[id]['rank'] in ['Officer', 'Leader']):", "return False nick = input.group(2).lower() if nick not in bot.nick2id: bot.reply('Sorry, I don''t", "in bot.clan_roster: player = bot.clan_roster[id] rank = player['rank'] query = {'nickname' : nick}", "else: nick = input.group(2).lower() if nick not in bot.nick2id: bot.reply(\"Unknown Player\") else: id", "members in /c m if set to non-zero value']) bot.config.module_config('officers', [[], 'Officers alts'])", "officers: {0}\".format( outstr ) ) officers.commands = ['officers'] def officer(bot, input): \"\"\"Add Officer", "== nick: bot.reply(\"You are now available.\") del(bot.dnd[key]) return bot.reply(\"You are now unavailable.\") bot.dnd.append(input.nick)", "bot.config.welcome_members > 0 and id in bot.id2nick: nick = bot.id2nick[id] bot.write_packet(ID.HON_CS_CLAN_MESSAGE,'Welcome, {0}!'.format(nick)) add_member.event", "= data[0] if id in bot.clan_roster: bot.clan_status[id] = data[1] if data[0] in [ID.HON_STATUS_OFFLINE]:", "ID.HON_STATUS_INGAME)) ) ) else: nick = input.group(2).lower() if nick not in bot.nick2id: bot.reply(\"Unknown", "bot.config.owner: bot.clan_status[id] = u[1] member_initstatus.event = [ID.HON_SC_INITIAL_STATUS] member_initstatus.thread = False def invite(bot,input): \"\"\"invites", "data[0],data[1],data[2] if status == 0: del(bot.clan_roster[who]) elif status == 1: if who in", "\"\"\"Find available officers\"\"\" avail_officers = [] for ply in bot.id2nick: if ply ==", "'Leader'] or bot.id2nick[ply] in bot.config.officers): avail_officers.append(bot.id2nick[ply]) elif bot.id2nick[ply] in bot.config.officers: if ply in", "not input.nick in bot.config.officers and not input.nick in bot.config.allowdnd: return for key, nick", "bot.id2nick: if ply == bot.account_id: continue if bot.id2nick[ply] in bot.dnd: continue if ply", "if set to non-zero value']) bot.config.module_config('officers', [[], 'Officers alts']) bot.config.module_config('allowdnd', [[], 'Allowed to", "= bot.nick2id[nick] if id in bot.clan_roster: player = bot.clan_roster[id] rank = player['rank'] query", "\"\"\" #nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) elif id in bot.id2nick and", "user_upgrades(bot, nick) bot.clan_status[id] = u[1] elif bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = u[1] member_initstatus.event", "def officer(bot, input): \"\"\"Add Officer Alt\"\"\" if not input.admin: return officers(bot, input) if", "only\"\"\" if not input.admin: return False bot.write_packet(ID.HON_CS_CLAN_ADD_MEMBER,input.group(2)) bot.reply(\"Invited {0}\".format(input.group(2))) invite.commands = ['invite'] def", "0: outstr = ', '.join(avail_officers) else: outstr = 'None' bot.reply( \"Available officers: {0}\".format(", "bot.config.owner: bot.clan_status[id] = data[1] member_changestatus.event = [ID.HON_SC_UPDATE_STATUS] def member_initstatus(bot,origin,data): for u in data[1]:", "member_initstatus.thread = False def invite(bot,input): \"\"\"invites to clan, admins only\"\"\" if not input.admin:", "outstr = ', '.join(avail_officers) else: outstr = 'None' bot.reply( \"Available officers: {0}\".format( outstr", "if ply == bot.account_id: continue if bot.id2nick[ply] in bot.dnd: continue if ply in", "in bot.clan_roster and not bot.clan_roster[id]['rank'] in ['Officer', 'Leader']): if not input.nick in bot.config.officers", "ply in bot.user_status and bot.user_status[ply] is ID.HON_STATUS_ONLINE: avail_officers.append(bot.id2nick[ply]) if len(avail_officers) > 0: outstr", "Player\") else: id = bot.nick2id[nick] if id in bot.clan_roster: player = bot.clan_roster[id] rank", "bot.nick2id[input.nick] if not id in bot.clan_roster or (id in bot.clan_roster and not bot.clan_roster[id]['rank']", "info(bot,input): \"\"\"Get clan member info\"\"\" if not input.group(2): bot.reply( \"{0} - Members: {1},", "False bot.write_packet(ID.HON_CS_CLAN_ADD_MEMBER,input.group(2)) bot.reply(\"Invited {0}\".format(input.group(2))) invite.commands = ['invite'] def remove(bot,input): \"\"\"remove from clan, admins", "bot.clan_roster[id] rank = player['rank'] query = {'nickname' : nick} query['f'] = 'show_stats' query['table']", "= bot.clan_roster[id] rank = player['rank'] query = {'nickname' : nick} query['f'] = 'show_stats'", "'Officer' elif status == 3:#not sure about this one bot.clan_roster[who]['rank'] = 'Leader' change_member.event", "in [ID.HON_STATUS_ONLINE]: \"\"\" \"\"\" #nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) elif id", "in data[1]: id = u[0] if id in bot.clan_roster: if u[1] in [ID.HON_STATUS_ONLINE,", "avail_officers = [] for ply in bot.id2nick: if ply == bot.account_id: continue if", "to officer list\".format(nick)) else: bot.reply(nick + \" is already an officer\") officer.commands =", "if nick not in bot.nick2id: bot.reply('Sorry, I don''t know ' + nick) else:", "don''t know ' + nick) else: id = bot.nick2id[nick] bot.write_packet(ID.HON_CS_CLAN_REMOVE_MEMBER,id) query = {", "if ply in bot.clan_status and ply in bot.clan_roster: if bot.clan_status[ply] is ID.HON_STATUS_ONLINE and", "if data[0] in [ID.HON_STATUS_OFFLINE]: for key, nick in enumerate(bot.dnd): if id in bot.id2nick", "bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) bot.clan_status[id] = u[1] elif bot.id2nick[id] == bot.config.owner: bot.clan_status[id]", "= input.group(2).lower() if nick not in bot.nick2id: bot.reply('Sorry, I don''t know ' +", "else: bot.reply(nick + \" isn't an officer\") unofficer.commands = ['unofficer'] def announce(bot, input):", "-*- from hon.packets import ID def setup(bot): bot.config.module_config('welcome_members',[1,'Will welcome members in /c m", "from the clan\") remove.commands = ['remove'] status = { ID.HON_STATUS_OFFLINE: \"Offline\", ID.HON_STATUS_ONLINE: \"Online\",", "+ nick) else: id = bot.nick2id[nick] bot.write_packet(ID.HON_CS_CLAN_REMOVE_MEMBER,id) query = { 'f' : 'set_rank',", "DND command']) bot.dnd = [] def change_member(bot,origin,data): who,status,whodid = data[0],data[1],data[2] if status ==", "ID.HON_STATUS_ONLINE: avail_officers.append(bot.id2nick[ply]) if len(avail_officers) > 0: outstr = ', '.join(avail_officers) else: outstr =", "= user_upgrades(bot, nick) elif id in bot.id2nick and bot.id2nick[id] == bot.config.owner: bot.clan_status[id] =", "id in bot.id2nick: nick = bot.id2nick[id] bot.write_packet(ID.HON_CS_CLAN_MESSAGE,'Welcome, {0}!'.format(nick)) add_member.event = [ID.HON_SC_CLAN_MEMBER_ADDED] def member_changestatus(bot,origin,data):", "\"\"\"Get clan member info\"\"\" if not input.group(2): bot.reply( \"{0} - Members: {1}, Online:", "not in bot.nick2id: bot.reply('Sorry, I don''t know ' + nick) else: id =", "officers(bot, input) if not input.group(2): return nick = input.group(2).lower() if not nick in", "player['rank'] query = {'nickname' : nick} query['f'] = 'show_stats' query['table'] = 'player' data", "bot.id2nick[id] bot.write_packet(ID.HON_CS_CLAN_MESSAGE,'Welcome, {0}!'.format(nick)) add_member.event = [ID.HON_SC_CLAN_MEMBER_ADDED] def member_changestatus(bot,origin,data): id = data[0] if id", "input.group(2).lower() if nick in bot.config.officers: bot.config.set_del('officers', nick) bot.reply(\"Removed {0} from officer list\".format(nick)) else:", "= [] def change_member(bot,origin,data): who,status,whodid = data[0],data[1],data[2] if status == 0: del(bot.clan_roster[who]) elif", "input): \"\"\"Remove Officer Alt\"\"\" if not input.admin: return False if not input.group(2): return", "False nick = input.group(2).lower() if nick not in bot.nick2id: bot.reply('Sorry, I don''t know", "in bot.clan_roster or (id in bot.clan_roster and not bot.clan_roster[id]['rank'] in ['Officer', 'Leader']): if", "nick: bot.reply(\"You are now available.\") del(bot.dnd[key]) return bot.reply(\"You are now unavailable.\") bot.dnd.append(input.nick) dnd.commands", "coding: utf8 -*- from hon.packets import ID def setup(bot): bot.config.module_config('welcome_members',[1,'Will welcome members in", "nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick) bot.clan_status[id] = u[1] elif bot.id2nick[id] ==", "bot.reply(nick + \" is already an officer\") officer.commands = ['officer'] def unofficer(bot, input):", "= bot.masterserver_request(query,cookie=True) bot.reply(\"{0} - Rank: {1}, Last Online: {2}, Status: {3}\".format(nick, rank, data['last_activity'],", "'Remove', 'clan_id' : bot.clan_info['clan_id'] } bot.masterserver_request(query) bot.reply(nick + \" was removed from the", "return for key, nick in enumerate(bot.dnd): if input.nick == nick: bot.reply(\"You are now", "bot.config.officers): avail_officers.append(bot.id2nick[ply]) elif bot.id2nick[ply] in bot.config.officers: if ply in bot.user_status and bot.user_status[ply] is", "not in bot.nick2id: bot.reply(\"Error occurred\") return id = bot.nick2id[input.nick] if not id in", "False if not input.group(2): return bot.write_packet(ID.HON_CS_CLAN_MESSAGE, input.group(2)) announce.commands = ['announce'] def dnd(bot, input):", "== 2: bot.clan_roster[who]['rank'] = 'Officer' elif status == 3:#not sure about this one", "and bot.id2nick[id] == bot.config.owner: bot.clan_status[id] = data[1] member_changestatus.event = [ID.HON_SC_UPDATE_STATUS] def member_initstatus(bot,origin,data): for", "{2}, In-Game: {3}\" .format( bot.clan_info['name'], len(bot.clan_roster), len(sublist(bot.clan_status, ID.HON_STATUS_ONLINE)), len(sublist(bot.clan_status, ID.HON_STATUS_INGAME)) ) ) else:", "id in bot.clan_roster: if u[1] in [ID.HON_STATUS_ONLINE, ID.HON_STATUS_INGAME]: nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] =", "bot.clan_roster and not bot.clan_roster[id]['rank'] in ['Officer', 'Leader']): if not input.nick in bot.config.officers and", "not nick in bot.config.officers: bot.config.set_add('officers', nick) bot.reply(\"Added {0} to officer list\".format(nick)) else: bot.reply(nick", "ID.HON_STATUS_ONLINE and (bot.clan_roster[ply]['rank'] in ['Officer', 'Leader'] or bot.id2nick[ply] in bot.config.officers): avail_officers.append(bot.id2nick[ply]) elif bot.id2nick[ply]", "def unofficer(bot, input): \"\"\"Remove Officer Alt\"\"\" if not input.admin: return False if not", "== value] def info(bot,input): \"\"\"Get clan member info\"\"\" if not input.group(2): bot.reply( \"{0}", "[[], 'Allowed to use DND command']) bot.dnd = [] def change_member(bot,origin,data): who,status,whodid =", "if data[1] in [ID.HON_STATUS_ONLINE]: \"\"\" \"\"\" #nick = bot.id2nick[id] #bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick)", "= input.group(2).lower() if nick in bot.config.officers: bot.config.set_del('officers', nick) bot.reply(\"Removed {0} from officer list\".format(nick))", "['info'] def officers(bot, input): \"\"\"Find available officers\"\"\" avail_officers = [] for ply in", "bot.account_id: continue if bot.id2nick[ply] in bot.dnd: continue if ply in bot.clan_status and ply", "in bot.config.allowdnd: return for key, nick in enumerate(bot.dnd): if input.nick == nick: bot.reply(\"You", "officer\") officer.commands = ['officer'] def unofficer(bot, input): \"\"\"Remove Officer Alt\"\"\" if not input.admin:", "an officer\") officer.commands = ['officer'] def unofficer(bot, input): \"\"\"Remove Officer Alt\"\"\" if not", ": 'Remove', 'clan_id' : bot.clan_info['clan_id'] } bot.masterserver_request(query) bot.reply(nick + \" was removed from", "member_initstatus.event = [ID.HON_SC_INITIAL_STATUS] member_initstatus.thread = False def invite(bot,input): \"\"\"invites to clan, admins only\"\"\"", "# -*- coding: utf8 -*- from hon.packets import ID def setup(bot): bot.config.module_config('welcome_members',[1,'Will welcome" ]
[ "frappe def execute(): a=frappe.new_doc(\"Task\") a.subject='axy' a.save() print(a.name) # #bench execute demo.doctype.task.execute # print('***************')", "import frappe def execute(): a=frappe.new_doc(\"Task\") a.subject='axy' a.save() print(a.name) # #bench execute demo.doctype.task.execute #" ]
[]
[ "the `App` class. Renders to terminal. \"\"\" def __init__(self, app, env_out, default_char, default_color:", "out=reduced_color_diffs) np.logical_or(char_diffs, reduced_color_diffs, out=char_diffs) write(\"\\x1b[?25l\") # Hide cursor ys, xs = np.nonzero(char_diffs) for", "np from ..colors import Color from .widget import Widget, overlapping_region from .widget_data_structures import", "self.colors, self._last_colors = self._last_colors, self.colors # Bring arrays into locals: canvas = self.canvas", "Point(0, 0) @property def absolute_pos(self): return Point(0, 0) @property def is_transparent(self): return False", "version of `(last_canvas != canvas) | np.any(last_colors != colors, axis=-1)` # that re-uses", "arrays.) np.not_equal(self._last_canvas, canvas, out=char_diffs) np.not_equal(self._last_colors, colors, out=color_diffs) np.any(color_diffs, axis=-1, out=reduced_color_diffs) np.logical_or(char_diffs, reduced_color_diffs, out=char_diffs)", "= np.nonzero(char_diffs) for y, x, color, char in zip(ys, xs, colors[ys, xs], canvas[ys,", "dtype=np.uint8) self.canvas = np.full_like(self._last_canvas, \"><\") # \"><\" will guarantee an entire screen redraw.", "colors, axis=-1)` # that re-uses buffers instead of creating new arrays.) np.not_equal(self._last_canvas, canvas,", "xs = np.nonzero(char_diffs) for y, x, color, char in zip(ys, xs, colors[ys, xs],", "to descendants until handled. \"\"\" any(widget.dispatch_press(key_press) for widget in reversed(self.children)) def dispatch_click(self, mouse_event):", "] self.resize(env_out.get_size()) def resize(self, dim: Size): \"\"\" Resize canvas. Last render is erased.", "reduced_color_diffs = self._reduced_color_diffs env_out = self.env_out write = env_out._buffer.append # Erase canvas: canvas[:]", "child_rect) # Find differences between current render and last render: # (This is", "until handled. \"\"\" any(widget.dispatch_press(key_press) for widget in reversed(self.children)) def dispatch_click(self, mouse_event): \"\"\" Dispatch", "# The escape codes for moving the cursor and setting the color concatenated:", "False @property def is_visible(self): return True @property def parent(self): return None @property def", "# Reset attributes env_out.flush() def dispatch_press(self, key_press): \"\"\" Dispatch key press to descendants", "np.full(dim, self.default_char, dtype=object) self._last_colors = np.full((*dim, 6), self.default_color, dtype=np.uint8) self.canvas = np.full_like(self._last_canvas, \"><\")", "self.canvas = np.full_like(self._last_canvas, \"><\") # \"><\" will guarantee an entire screen redraw. self.colors", "xs], canvas[ys, xs]): # The escape codes for moving the cursor and setting", "in reversed(self.children)) def dispatch_click(self, mouse_event): \"\"\" Dispatch mouse event to descendents until handled.", "parent(self): return None @property def root(self): return self @property def app(self): return self._app", "and last render: # (This is optimized version of `(last_canvas != canvas) |", "return 0 @property def pos(self): return Point(0, 0) @property def absolute_pos(self): return Point(0,", "= np.full((*dim, 6), self.default_color, dtype=np.uint8) self.canvas = np.full_like(self._last_canvas, \"><\") # \"><\" will guarantee", "self.default_char, dtype=object) self._last_colors = np.full((*dim, 6), self.default_color, dtype=np.uint8) self.canvas = np.full_like(self._last_canvas, \"><\") #", "np.not_equal(self._last_canvas, canvas, out=char_diffs) np.not_equal(self._last_colors, colors, out=color_diffs) np.any(color_diffs, axis=-1, out=reduced_color_diffs) np.logical_or(char_diffs, reduced_color_diffs, out=char_diffs) write(\"\\x1b[?25l\")", "child.update_geometry() @property def top(self): return 0 @property def left(self): return 0 @property def", "canvas: canvas[:] = self.default_char colors[:, :] = self.default_color overlap = overlapping_region height, width", "def __init__(self, app, env_out, default_char, default_color: Color): self._app = app self.env_out = env_out", "last render: self.canvas, self._last_canvas = self._last_canvas, self.canvas self.colors, self._last_colors = self._last_colors, self.colors #", "= region child.render(canvas[dest_slice], colors[dest_slice], child_rect) # Find differences between current render and last", "self._last_canvas, self.canvas self.colors, self._last_colors = self._last_colors, self.colors # Bring arrays into locals: canvas", "creating new arrays.) np.not_equal(self._last_canvas, canvas, out=char_diffs) np.not_equal(self._last_colors, colors, out=color_diffs) np.any(color_diffs, axis=-1, out=reduced_color_diffs) np.logical_or(char_diffs,", "root(self): return self @property def app(self): return self._app def absolute_to_relative_coords(self, coord): return coord", ".widget_data_structures import Point, Size, Rect class _Root(Widget): \"\"\" Root widget. Meant to be", "reversed(self.children)) def dispatch_click(self, mouse_event): \"\"\" Dispatch mouse event to descendents until handled. \"\"\"", "child_rect = region child.render(canvas[dest_slice], colors[dest_slice], child_rect) # Find differences between current render and", "between current render and last render: # (This is optimized version of `(last_canvas", "def parent(self): return None @property def root(self): return self @property def app(self): return", "= canvas.shape rect = Rect( 0, 0, height, width, height, width, ) for", "= self._color_diffs reduced_color_diffs = self._reduced_color_diffs env_out = self.env_out write = env_out._buffer.append # Erase", "0 @property def left(self): return 0 @property def pos(self): return Point(0, 0) @property", "current render and last render: # (This is optimized version of `(last_canvas !=", "concatenated: write(\"\\x1b[{};{}H\\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}\".format(y + 1, x + 1, *color, char)) write(\"\\x1b[0m\") # Reset attributes", "@property def left(self): return 0 @property def pos(self): return Point(0, 0) @property def", "escape codes for moving the cursor and setting the color concatenated: write(\"\\x1b[{};{}H\\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}\".format(y +", "env_out self.default_char = default_char self.default_color = default_color self.children = [ ] self.resize(env_out.get_size()) def", "out=color_diffs) np.any(color_diffs, axis=-1, out=reduced_color_diffs) np.logical_or(char_diffs, reduced_color_diffs, out=char_diffs) write(\"\\x1b[?25l\") # Hide cursor ys, xs", "= self._reduced_color_diffs env_out = self.env_out write = env_out._buffer.append # Erase canvas: canvas[:] =", "left(self): return 0 @property def pos(self): return Point(0, 0) @property def absolute_pos(self): return", "from .widget_data_structures import Point, Size, Rect class _Root(Widget): \"\"\" Root widget. Meant to", "locals: canvas = self.canvas colors = self.colors char_diffs = self._char_diffs color_diffs = self._color_diffs", "return Point(0, 0) @property def absolute_pos(self): return Point(0, 0) @property def is_transparent(self): return", "1, *color, char)) write(\"\\x1b[0m\") # Reset attributes env_out.flush() def dispatch_press(self, key_press): \"\"\" Dispatch", "press to descendants until handled. \"\"\" any(widget.dispatch_press(key_press) for widget in reversed(self.children)) def dispatch_click(self,", "import Point, Size, Rect class _Root(Widget): \"\"\" Root widget. Meant to be instantiated", "Reset attributes env_out.flush() def dispatch_press(self, key_press): \"\"\" Dispatch key press to descendants until", "width = canvas.shape rect = Rect( 0, 0, height, width, height, width, )", "\"\"\" Dispatch key press to descendants until handled. \"\"\" any(widget.dispatch_press(key_press) for widget in", "= self.colors char_diffs = self._char_diffs color_diffs = self._color_diffs reduced_color_diffs = self._reduced_color_diffs env_out =", "np.zeros_like(self.colors, dtype=np.bool8) self._reduced_color_diffs = np.zeros_like(self.canvas, dtype=np.bool8) for child in self.children: child.update_geometry() @property def", "setting the color concatenated: write(\"\\x1b[{};{}H\\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}\".format(y + 1, x + 1, *color, char)) write(\"\\x1b[0m\")", "into locals: canvas = self.canvas colors = self.colors char_diffs = self._char_diffs color_diffs =", "np.any(color_diffs, axis=-1, out=reduced_color_diffs) np.logical_or(char_diffs, reduced_color_diffs, out=char_diffs) write(\"\\x1b[?25l\") # Hide cursor ys, xs =", "dispatch_click(self, mouse_event): \"\"\" Dispatch mouse event to descendents until handled. \"\"\" any(widget.dispatch_click(mouse_event) for", "canvas. Last render is erased. \"\"\" self.env_out.erase_screen() self.env_out.flush() self._dim = dim self._last_canvas =", "self.default_color = default_color self.children = [ ] self.resize(env_out.get_size()) def resize(self, dim: Size): \"\"\"", "char)) write(\"\\x1b[0m\") # Reset attributes env_out.flush() def dispatch_press(self, key_press): \"\"\" Dispatch key press", "moving the cursor and setting the color concatenated: write(\"\\x1b[{};{}H\\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}\".format(y + 1, x +", "= self._last_colors.copy() # Buffer arrays to re-use in the `render` method: self._char_diffs =", "re-use in the `render` method: self._char_diffs = np.zeros_like(self.canvas, dtype=np.bool8) self._color_diffs = np.zeros_like(self.colors, dtype=np.bool8)", "to be instantiated by the `App` class. Renders to terminal. \"\"\" def __init__(self,", "= overlapping_region height, width = canvas.shape rect = Rect( 0, 0, height, width,", "new arrays.) np.not_equal(self._last_canvas, canvas, out=char_diffs) np.not_equal(self._last_colors, colors, out=color_diffs) np.any(color_diffs, axis=-1, out=reduced_color_diffs) np.logical_or(char_diffs, reduced_color_diffs,", "import numpy as np from ..colors import Color from .widget import Widget, overlapping_region", "widget in reversed(self.children)) def dispatch_click(self, mouse_event): \"\"\" Dispatch mouse event to descendents until", "the color concatenated: write(\"\\x1b[{};{}H\\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}\".format(y + 1, x + 1, *color, char)) write(\"\\x1b[0m\") #", "codes for moving the cursor and setting the color concatenated: write(\"\\x1b[{};{}H\\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}\".format(y + 1,", "`render` method: self._char_diffs = np.zeros_like(self.canvas, dtype=np.bool8) self._color_diffs = np.zeros_like(self.colors, dtype=np.bool8) self._reduced_color_diffs = np.zeros_like(self.canvas,", "to terminal. \"\"\" def __init__(self, app, env_out, default_char, default_color: Color): self._app = app", "terminal. \"\"\" # Swap canvas with last render: self.canvas, self._last_canvas = self._last_canvas, self.canvas", "app(self): return self._app def absolute_to_relative_coords(self, coord): return coord def render(self): \"\"\" Paint canvas.", "= env_out._buffer.append # Erase canvas: canvas[:] = self.default_char colors[:, :] = self.default_color overlap", "True @property def parent(self): return None @property def root(self): return self @property def", "self.env_out.flush() self._dim = dim self._last_canvas = np.full(dim, self.default_char, dtype=object) self._last_colors = np.full((*dim, 6),", "@property def top(self): return 0 @property def left(self): return 0 @property def pos(self):", "for child in self.children: if region := overlap(rect, child): dest_slice, child_rect = region", "buffers instead of creating new arrays.) np.not_equal(self._last_canvas, canvas, out=char_diffs) np.not_equal(self._last_colors, colors, out=color_diffs) np.any(color_diffs,", "+ 1, x + 1, *color, char)) write(\"\\x1b[0m\") # Reset attributes env_out.flush() def", "render and last render: # (This is optimized version of `(last_canvas != canvas)", "self._reduced_color_diffs env_out = self.env_out write = env_out._buffer.append # Erase canvas: canvas[:] = self.default_char", "= np.zeros_like(self.colors, dtype=np.bool8) self._reduced_color_diffs = np.zeros_like(self.canvas, dtype=np.bool8) for child in self.children: child.update_geometry() @property", "rect = Rect( 0, 0, height, width, height, width, ) for child in", "np.full((*dim, 6), self.default_color, dtype=np.uint8) self.canvas = np.full_like(self._last_canvas, \"><\") # \"><\" will guarantee an", "overlap(rect, child): dest_slice, child_rect = region child.render(canvas[dest_slice], colors[dest_slice], child_rect) # Find differences between", "default_color self.children = [ ] self.resize(env_out.get_size()) def resize(self, dim: Size): \"\"\" Resize canvas.", "render is erased. \"\"\" self.env_out.erase_screen() self.env_out.flush() self._dim = dim self._last_canvas = np.full(dim, self.default_char,", "zip(ys, xs, colors[ys, xs], canvas[ys, xs]): # The escape codes for moving the", "def left(self): return 0 @property def pos(self): return Point(0, 0) @property def absolute_pos(self):", "screen redraw. self.colors = self._last_colors.copy() # Buffer arrays to re-use in the `render`", "Render to terminal. \"\"\" # Swap canvas with last render: self.canvas, self._last_canvas =", "= np.full(dim, self.default_char, dtype=object) self._last_colors = np.full((*dim, 6), self.default_color, dtype=np.uint8) self.canvas = np.full_like(self._last_canvas,", "dtype=object) self._last_colors = np.full((*dim, 6), self.default_color, dtype=np.uint8) self.canvas = np.full_like(self._last_canvas, \"><\") # \"><\"", "Color): self._app = app self.env_out = env_out self.default_char = default_char self.default_color = default_color", "render(self): \"\"\" Paint canvas. Render to terminal. \"\"\" # Swap canvas with last", "class _Root(Widget): \"\"\" Root widget. Meant to be instantiated by the `App` class.", "canvas[:] = self.default_char colors[:, :] = self.default_color overlap = overlapping_region height, width =", "Root widget. Meant to be instantiated by the `App` class. Renders to terminal.", "\"\"\" Resize canvas. Last render is erased. \"\"\" self.env_out.erase_screen() self.env_out.flush() self._dim = dim", "optimized version of `(last_canvas != canvas) | np.any(last_colors != colors, axis=-1)` # that", "np.not_equal(self._last_colors, colors, out=color_diffs) np.any(color_diffs, axis=-1, out=reduced_color_diffs) np.logical_or(char_diffs, reduced_color_diffs, out=char_diffs) write(\"\\x1b[?25l\") # Hide cursor", "\"\"\" any(widget.dispatch_press(key_press) for widget in reversed(self.children)) def dispatch_click(self, mouse_event): \"\"\" Dispatch mouse event", "The escape codes for moving the cursor and setting the color concatenated: write(\"\\x1b[{};{}H\\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}\".format(y", "# Bring arrays into locals: canvas = self.canvas colors = self.colors char_diffs =", "Dispatch mouse event to descendents until handled. \"\"\" any(widget.dispatch_click(mouse_event) for widget in reversed(self.children))", "cursor and setting the color concatenated: write(\"\\x1b[{};{}H\\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}\".format(y + 1, x + 1, *color,", "def absolute_to_relative_coords(self, coord): return coord def render(self): \"\"\" Paint canvas. Render to terminal.", "# Buffer arrays to re-use in the `render` method: self._char_diffs = np.zeros_like(self.canvas, dtype=np.bool8)", "last render: # (This is optimized version of `(last_canvas != canvas) | np.any(last_colors", "default_char, default_color: Color): self._app = app self.env_out = env_out self.default_char = default_char self.default_color", "__init__(self, app, env_out, default_char, default_color: Color): self._app = app self.env_out = env_out self.default_char", "= np.full_like(self._last_canvas, \"><\") # \"><\" will guarantee an entire screen redraw. self.colors =", "is_visible(self): return True @property def parent(self): return None @property def root(self): return self", "= self._last_colors, self.colors # Bring arrays into locals: canvas = self.canvas colors =", "def app(self): return self._app def absolute_to_relative_coords(self, coord): return coord def render(self): \"\"\" Paint", "entire screen redraw. self.colors = self._last_colors.copy() # Buffer arrays to re-use in the", "Size, Rect class _Root(Widget): \"\"\" Root widget. Meant to be instantiated by the", "render: # (This is optimized version of `(last_canvas != canvas) | np.any(last_colors !=", "for child in self.children: child.update_geometry() @property def top(self): return 0 @property def left(self):", "descendants until handled. \"\"\" any(widget.dispatch_press(key_press) for widget in reversed(self.children)) def dispatch_click(self, mouse_event): \"\"\"", "self._color_diffs reduced_color_diffs = self._reduced_color_diffs env_out = self.env_out write = env_out._buffer.append # Erase canvas:", "coord): return coord def render(self): \"\"\" Paint canvas. Render to terminal. \"\"\" #", "arrays to re-use in the `render` method: self._char_diffs = np.zeros_like(self.canvas, dtype=np.bool8) self._color_diffs =", "= self._char_diffs color_diffs = self._color_diffs reduced_color_diffs = self._reduced_color_diffs env_out = self.env_out write =", "absolute_to_relative_coords(self, coord): return coord def render(self): \"\"\" Paint canvas. Render to terminal. \"\"\"", "with last render: self.canvas, self._last_canvas = self._last_canvas, self.canvas self.colors, self._last_colors = self._last_colors, self.colors", "Rect( 0, 0, height, width, height, width, ) for child in self.children: if", "to terminal. \"\"\" # Swap canvas with last render: self.canvas, self._last_canvas = self._last_canvas,", "0) @property def is_transparent(self): return False @property def is_visible(self): return True @property def", "differences between current render and last render: # (This is optimized version of", "`App` class. Renders to terminal. \"\"\" def __init__(self, app, env_out, default_char, default_color: Color):", "# that re-uses buffers instead of creating new arrays.) np.not_equal(self._last_canvas, canvas, out=char_diffs) np.not_equal(self._last_colors,", "dim self._last_canvas = np.full(dim, self.default_char, dtype=object) self._last_colors = np.full((*dim, 6), self.default_color, dtype=np.uint8) self.canvas", "of `(last_canvas != canvas) | np.any(last_colors != colors, axis=-1)` # that re-uses buffers", "Meant to be instantiated by the `App` class. Renders to terminal. \"\"\" def", "axis=-1, out=reduced_color_diffs) np.logical_or(char_diffs, reduced_color_diffs, out=char_diffs) write(\"\\x1b[?25l\") # Hide cursor ys, xs = np.nonzero(char_diffs)", "absolute_pos(self): return Point(0, 0) @property def is_transparent(self): return False @property def is_visible(self): return", "re-uses buffers instead of creating new arrays.) np.not_equal(self._last_canvas, canvas, out=char_diffs) np.not_equal(self._last_colors, colors, out=color_diffs)", "self._last_canvas = np.full(dim, self.default_char, dtype=object) self._last_colors = np.full((*dim, 6), self.default_color, dtype=np.uint8) self.canvas =", "return Point(0, 0) @property def is_transparent(self): return False @property def is_visible(self): return True", "self.colors # Bring arrays into locals: canvas = self.canvas colors = self.colors char_diffs", "key press to descendants until handled. \"\"\" any(widget.dispatch_press(key_press) for widget in reversed(self.children)) def", "guarantee an entire screen redraw. self.colors = self._last_colors.copy() # Buffer arrays to re-use", "= np.zeros_like(self.canvas, dtype=np.bool8) self._color_diffs = np.zeros_like(self.colors, dtype=np.bool8) self._reduced_color_diffs = np.zeros_like(self.canvas, dtype=np.bool8) for child", "Paint canvas. Render to terminal. \"\"\" # Swap canvas with last render: self.canvas,", "out=char_diffs) np.not_equal(self._last_colors, colors, out=color_diffs) np.any(color_diffs, axis=-1, out=reduced_color_diffs) np.logical_or(char_diffs, reduced_color_diffs, out=char_diffs) write(\"\\x1b[?25l\") # Hide", "self._char_diffs = np.zeros_like(self.canvas, dtype=np.bool8) self._color_diffs = np.zeros_like(self.colors, dtype=np.bool8) self._reduced_color_diffs = np.zeros_like(self.canvas, dtype=np.bool8) for", "self._last_canvas = self._last_canvas, self.canvas self.colors, self._last_colors = self._last_colors, self.colors # Bring arrays into", "x + 1, *color, char)) write(\"\\x1b[0m\") # Reset attributes env_out.flush() def dispatch_press(self, key_press):", "= app self.env_out = env_out self.default_char = default_char self.default_color = default_color self.children =", "return True @property def parent(self): return None @property def root(self): return self @property", "self.env_out.erase_screen() self.env_out.flush() self._dim = dim self._last_canvas = np.full(dim, self.default_char, dtype=object) self._last_colors = np.full((*dim,", "Renders to terminal. \"\"\" def __init__(self, app, env_out, default_char, default_color: Color): self._app =", "import Color from .widget import Widget, overlapping_region from .widget_data_structures import Point, Size, Rect", "is_transparent(self): return False @property def is_visible(self): return True @property def parent(self): return None", "self.colors = self._last_colors.copy() # Buffer arrays to re-use in the `render` method: self._char_diffs", "axis=-1)` # that re-uses buffers instead of creating new arrays.) np.not_equal(self._last_canvas, canvas, out=char_diffs)", "def resize(self, dim: Size): \"\"\" Resize canvas. Last render is erased. \"\"\" self.env_out.erase_screen()", "x, color, char in zip(ys, xs, colors[ys, xs], canvas[ys, xs]): # The escape", "Erase canvas: canvas[:] = self.default_char colors[:, :] = self.default_color overlap = overlapping_region height,", "of creating new arrays.) np.not_equal(self._last_canvas, canvas, out=char_diffs) np.not_equal(self._last_colors, colors, out=color_diffs) np.any(color_diffs, axis=-1, out=reduced_color_diffs)", "Last render is erased. \"\"\" self.env_out.erase_screen() self.env_out.flush() self._dim = dim self._last_canvas = np.full(dim,", "in zip(ys, xs, colors[ys, xs], canvas[ys, xs]): # The escape codes for moving", "| np.any(last_colors != colors, axis=-1)` # that re-uses buffers instead of creating new", "env_out._buffer.append # Erase canvas: canvas[:] = self.default_char colors[:, :] = self.default_color overlap =", "an entire screen redraw. self.colors = self._last_colors.copy() # Buffer arrays to re-use in", "np.full_like(self._last_canvas, \"><\") # \"><\" will guarantee an entire screen redraw. self.colors = self._last_colors.copy()", "if region := overlap(rect, child): dest_slice, child_rect = region child.render(canvas[dest_slice], colors[dest_slice], child_rect) #", "self.default_char = default_char self.default_color = default_color self.children = [ ] self.resize(env_out.get_size()) def resize(self,", "cursor ys, xs = np.nonzero(char_diffs) for y, x, color, char in zip(ys, xs,", "return self._app def absolute_to_relative_coords(self, coord): return coord def render(self): \"\"\" Paint canvas. Render", "self._dim = dim self._last_canvas = np.full(dim, self.default_char, dtype=object) self._last_colors = np.full((*dim, 6), self.default_color,", "char_diffs = self._char_diffs color_diffs = self._color_diffs reduced_color_diffs = self._reduced_color_diffs env_out = self.env_out write", "canvas, out=char_diffs) np.not_equal(self._last_colors, colors, out=color_diffs) np.any(color_diffs, axis=-1, out=reduced_color_diffs) np.logical_or(char_diffs, reduced_color_diffs, out=char_diffs) write(\"\\x1b[?25l\") #", "colors[dest_slice], child_rect) # Find differences between current render and last render: # (This", ".widget import Widget, overlapping_region from .widget_data_structures import Point, Size, Rect class _Root(Widget): \"\"\"", "by the `App` class. Renders to terminal. \"\"\" def __init__(self, app, env_out, default_char,", "as np from ..colors import Color from .widget import Widget, overlapping_region from .widget_data_structures", "dtype=np.bool8) self._reduced_color_diffs = np.zeros_like(self.canvas, dtype=np.bool8) for child in self.children: child.update_geometry() @property def top(self):", "\"\"\" Root widget. Meant to be instantiated by the `App` class. Renders to", "self._last_colors, self.colors # Bring arrays into locals: canvas = self.canvas colors = self.colors", "arrays into locals: canvas = self.canvas colors = self.colors char_diffs = self._char_diffs color_diffs", "(This is optimized version of `(last_canvas != canvas) | np.any(last_colors != colors, axis=-1)`", "resize(self, dim: Size): \"\"\" Resize canvas. Last render is erased. \"\"\" self.env_out.erase_screen() self.env_out.flush()", "Buffer arrays to re-use in the `render` method: self._char_diffs = np.zeros_like(self.canvas, dtype=np.bool8) self._color_diffs", "in self.children: if region := overlap(rect, child): dest_slice, child_rect = region child.render(canvas[dest_slice], colors[dest_slice],", "def top(self): return 0 @property def left(self): return 0 @property def pos(self): return", "ys, xs = np.nonzero(char_diffs) for y, x, color, char in zip(ys, xs, colors[ys,", "from .widget import Widget, overlapping_region from .widget_data_structures import Point, Size, Rect class _Root(Widget):", "Color from .widget import Widget, overlapping_region from .widget_data_structures import Point, Size, Rect class", "that re-uses buffers instead of creating new arrays.) np.not_equal(self._last_canvas, canvas, out=char_diffs) np.not_equal(self._last_colors, colors,", "to re-use in the `render` method: self._char_diffs = np.zeros_like(self.canvas, dtype=np.bool8) self._color_diffs = np.zeros_like(self.colors,", "self.canvas self.colors, self._last_colors = self._last_colors, self.colors # Bring arrays into locals: canvas =", "be instantiated by the `App` class. Renders to terminal. \"\"\" def __init__(self, app,", "self.env_out write = env_out._buffer.append # Erase canvas: canvas[:] = self.default_char colors[:, :] =", "return self @property def app(self): return self._app def absolute_to_relative_coords(self, coord): return coord def", "default_char self.default_color = default_color self.children = [ ] self.resize(env_out.get_size()) def resize(self, dim: Size):", "attributes env_out.flush() def dispatch_press(self, key_press): \"\"\" Dispatch key press to descendants until handled.", "self.default_color overlap = overlapping_region height, width = canvas.shape rect = Rect( 0, 0,", "[ ] self.resize(env_out.get_size()) def resize(self, dim: Size): \"\"\" Resize canvas. Last render is", "0 @property def pos(self): return Point(0, 0) @property def absolute_pos(self): return Point(0, 0)", "from ..colors import Color from .widget import Widget, overlapping_region from .widget_data_structures import Point,", "region := overlap(rect, child): dest_slice, child_rect = region child.render(canvas[dest_slice], colors[dest_slice], child_rect) # Find", "canvas. Render to terminal. \"\"\" # Swap canvas with last render: self.canvas, self._last_canvas", "colors[ys, xs], canvas[ys, xs]): # The escape codes for moving the cursor and", "write = env_out._buffer.append # Erase canvas: canvas[:] = self.default_char colors[:, :] = self.default_color", "!= colors, axis=-1)` # that re-uses buffers instead of creating new arrays.) np.not_equal(self._last_canvas,", "reduced_color_diffs, out=char_diffs) write(\"\\x1b[?25l\") # Hide cursor ys, xs = np.nonzero(char_diffs) for y, x,", "xs, colors[ys, xs], canvas[ys, xs]): # The escape codes for moving the cursor", "# Hide cursor ys, xs = np.nonzero(char_diffs) for y, x, color, char in", "top(self): return 0 @property def left(self): return 0 @property def pos(self): return Point(0,", "render: self.canvas, self._last_canvas = self._last_canvas, self.canvas self.colors, self._last_colors = self._last_colors, self.colors # Bring", "handled. \"\"\" any(widget.dispatch_press(key_press) for widget in reversed(self.children)) def dispatch_click(self, mouse_event): \"\"\" Dispatch mouse", "= default_char self.default_color = default_color self.children = [ ] self.resize(env_out.get_size()) def resize(self, dim:", "for moving the cursor and setting the color concatenated: write(\"\\x1b[{};{}H\\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}\".format(y + 1, x", "\"\"\" self.env_out.erase_screen() self.env_out.flush() self._dim = dim self._last_canvas = np.full(dim, self.default_char, dtype=object) self._last_colors =", "!= canvas) | np.any(last_colors != colors, axis=-1)` # that re-uses buffers instead of", "self @property def app(self): return self._app def absolute_to_relative_coords(self, coord): return coord def render(self):", "= default_color self.children = [ ] self.resize(env_out.get_size()) def resize(self, dim: Size): \"\"\" Resize", "the cursor and setting the color concatenated: write(\"\\x1b[{};{}H\\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}\".format(y + 1, x + 1,", "Hide cursor ys, xs = np.nonzero(char_diffs) for y, x, color, char in zip(ys,", "self._last_colors.copy() # Buffer arrays to re-use in the `render` method: self._char_diffs = np.zeros_like(self.canvas,", "= self.default_color overlap = overlapping_region height, width = canvas.shape rect = Rect( 0,", "Size): \"\"\" Resize canvas. Last render is erased. \"\"\" self.env_out.erase_screen() self.env_out.flush() self._dim =", "the `render` method: self._char_diffs = np.zeros_like(self.canvas, dtype=np.bool8) self._color_diffs = np.zeros_like(self.colors, dtype=np.bool8) self._reduced_color_diffs =", "Find differences between current render and last render: # (This is optimized version", "return False @property def is_visible(self): return True @property def parent(self): return None @property", "dest_slice, child_rect = region child.render(canvas[dest_slice], colors[dest_slice], child_rect) # Find differences between current render", "method: self._char_diffs = np.zeros_like(self.canvas, dtype=np.bool8) self._color_diffs = np.zeros_like(self.colors, dtype=np.bool8) self._reduced_color_diffs = np.zeros_like(self.canvas, dtype=np.bool8)", "in self.children: child.update_geometry() @property def top(self): return 0 @property def left(self): return 0", "`(last_canvas != canvas) | np.any(last_colors != colors, axis=-1)` # that re-uses buffers instead", "region child.render(canvas[dest_slice], colors[dest_slice], child_rect) # Find differences between current render and last render:", "child): dest_slice, child_rect = region child.render(canvas[dest_slice], colors[dest_slice], child_rect) # Find differences between current", "erased. \"\"\" self.env_out.erase_screen() self.env_out.flush() self._dim = dim self._last_canvas = np.full(dim, self.default_char, dtype=object) self._last_colors", "# \"><\" will guarantee an entire screen redraw. self.colors = self._last_colors.copy() # Buffer", "0, 0, height, width, height, width, ) for child in self.children: if region", "instead of creating new arrays.) np.not_equal(self._last_canvas, canvas, out=char_diffs) np.not_equal(self._last_colors, colors, out=color_diffs) np.any(color_diffs, axis=-1,", "char in zip(ys, xs, colors[ys, xs], canvas[ys, xs]): # The escape codes for", "def render(self): \"\"\" Paint canvas. Render to terminal. \"\"\" # Swap canvas with", "height, width = canvas.shape rect = Rect( 0, 0, height, width, height, width,", ") for child in self.children: if region := overlap(rect, child): dest_slice, child_rect =", "self._app = app self.env_out = env_out self.default_char = default_char self.default_color = default_color self.children", "Bring arrays into locals: canvas = self.canvas colors = self.colors char_diffs = self._char_diffs", "class. Renders to terminal. \"\"\" def __init__(self, app, env_out, default_char, default_color: Color): self._app", ":= overlap(rect, child): dest_slice, child_rect = region child.render(canvas[dest_slice], colors[dest_slice], child_rect) # Find differences", "xs]): # The escape codes for moving the cursor and setting the color", "= self.env_out write = env_out._buffer.append # Erase canvas: canvas[:] = self.default_char colors[:, :]", "self._color_diffs = np.zeros_like(self.colors, dtype=np.bool8) self._reduced_color_diffs = np.zeros_like(self.canvas, dtype=np.bool8) for child in self.children: child.update_geometry()", "self.colors char_diffs = self._char_diffs color_diffs = self._color_diffs reduced_color_diffs = self._reduced_color_diffs env_out = self.env_out", "0) @property def absolute_pos(self): return Point(0, 0) @property def is_transparent(self): return False @property", "canvas[ys, xs]): # The escape codes for moving the cursor and setting the", "colors, out=color_diffs) np.any(color_diffs, axis=-1, out=reduced_color_diffs) np.logical_or(char_diffs, reduced_color_diffs, out=char_diffs) write(\"\\x1b[?25l\") # Hide cursor ys,", "overlapping_region height, width = canvas.shape rect = Rect( 0, 0, height, width, height,", "def is_transparent(self): return False @property def is_visible(self): return True @property def parent(self): return", "y, x, color, char in zip(ys, xs, colors[ys, xs], canvas[ys, xs]): # The", "None @property def root(self): return self @property def app(self): return self._app def absolute_to_relative_coords(self,", "instantiated by the `App` class. Renders to terminal. \"\"\" def __init__(self, app, env_out,", "*color, char)) write(\"\\x1b[0m\") # Reset attributes env_out.flush() def dispatch_press(self, key_press): \"\"\" Dispatch key", "def absolute_pos(self): return Point(0, 0) @property def is_transparent(self): return False @property def is_visible(self):", "color, char in zip(ys, xs, colors[ys, xs], canvas[ys, xs]): # The escape codes", "and setting the color concatenated: write(\"\\x1b[{};{}H\\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}\".format(y + 1, x + 1, *color, char))", "= self._last_canvas, self.canvas self.colors, self._last_colors = self._last_colors, self.colors # Bring arrays into locals:", "\"\"\" # Swap canvas with last render: self.canvas, self._last_canvas = self._last_canvas, self.canvas self.colors,", ":] = self.default_color overlap = overlapping_region height, width = canvas.shape rect = Rect(", "np.logical_or(char_diffs, reduced_color_diffs, out=char_diffs) write(\"\\x1b[?25l\") # Hide cursor ys, xs = np.nonzero(char_diffs) for y,", "@property def pos(self): return Point(0, 0) @property def absolute_pos(self): return Point(0, 0) @property", "\"\"\" Paint canvas. Render to terminal. \"\"\" # Swap canvas with last render:", "widget. Meant to be instantiated by the `App` class. Renders to terminal. \"\"\"", "height, width, height, width, ) for child in self.children: if region := overlap(rect,", "= self.canvas colors = self.colors char_diffs = self._char_diffs color_diffs = self._color_diffs reduced_color_diffs =", "numpy as np from ..colors import Color from .widget import Widget, overlapping_region from", "Widget, overlapping_region from .widget_data_structures import Point, Size, Rect class _Root(Widget): \"\"\" Root widget.", "import Widget, overlapping_region from .widget_data_structures import Point, Size, Rect class _Root(Widget): \"\"\" Root", "self._last_colors = self._last_colors, self.colors # Bring arrays into locals: canvas = self.canvas colors", "colors[:, :] = self.default_color overlap = overlapping_region height, width = canvas.shape rect =", "Rect class _Root(Widget): \"\"\" Root widget. Meant to be instantiated by the `App`", "self.children: if region := overlap(rect, child): dest_slice, child_rect = region child.render(canvas[dest_slice], colors[dest_slice], child_rect)", "np.nonzero(char_diffs) for y, x, color, char in zip(ys, xs, colors[ys, xs], canvas[ys, xs]):", "out=char_diffs) write(\"\\x1b[?25l\") # Hide cursor ys, xs = np.nonzero(char_diffs) for y, x, color,", "terminal. \"\"\" def __init__(self, app, env_out, default_char, default_color: Color): self._app = app self.env_out", "Point, Size, Rect class _Root(Widget): \"\"\" Root widget. Meant to be instantiated by", "\"\"\" def __init__(self, app, env_out, default_char, default_color: Color): self._app = app self.env_out =", "@property def parent(self): return None @property def root(self): return self @property def app(self):", "@property def app(self): return self._app def absolute_to_relative_coords(self, coord): return coord def render(self): \"\"\"", "@property def is_transparent(self): return False @property def is_visible(self): return True @property def parent(self):", "@property def root(self): return self @property def app(self): return self._app def absolute_to_relative_coords(self, coord):", "child in self.children: child.update_geometry() @property def top(self): return 0 @property def left(self): return", "Resize canvas. Last render is erased. \"\"\" self.env_out.erase_screen() self.env_out.flush() self._dim = dim self._last_canvas", "dtype=np.bool8) self._color_diffs = np.zeros_like(self.colors, dtype=np.bool8) self._reduced_color_diffs = np.zeros_like(self.canvas, dtype=np.bool8) for child in self.children:", "env_out.flush() def dispatch_press(self, key_press): \"\"\" Dispatch key press to descendants until handled. \"\"\"", "app self.env_out = env_out self.default_char = default_char self.default_color = default_color self.children = [", "width, ) for child in self.children: if region := overlap(rect, child): dest_slice, child_rect", "write(\"\\x1b[{};{}H\\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}\".format(y + 1, x + 1, *color, char)) write(\"\\x1b[0m\") # Reset attributes env_out.flush()", "6), self.default_color, dtype=np.uint8) self.canvas = np.full_like(self._last_canvas, \"><\") # \"><\" will guarantee an entire", "dispatch_press(self, key_press): \"\"\" Dispatch key press to descendants until handled. \"\"\" any(widget.dispatch_press(key_press) for", "width, height, width, ) for child in self.children: if region := overlap(rect, child):", "in the `render` method: self._char_diffs = np.zeros_like(self.canvas, dtype=np.bool8) self._color_diffs = np.zeros_like(self.colors, dtype=np.bool8) self._reduced_color_diffs", "Swap canvas with last render: self.canvas, self._last_canvas = self._last_canvas, self.canvas self.colors, self._last_colors =", "self.resize(env_out.get_size()) def resize(self, dim: Size): \"\"\" Resize canvas. Last render is erased. \"\"\"", "self.default_color, dtype=np.uint8) self.canvas = np.full_like(self._last_canvas, \"><\") # \"><\" will guarantee an entire screen", "\"><\") # \"><\" will guarantee an entire screen redraw. self.colors = self._last_colors.copy() #", "= [ ] self.resize(env_out.get_size()) def resize(self, dim: Size): \"\"\" Resize canvas. Last render", "self._last_colors = np.full((*dim, 6), self.default_color, dtype=np.uint8) self.canvas = np.full_like(self._last_canvas, \"><\") # \"><\" will", "= env_out self.default_char = default_char self.default_color = default_color self.children = [ ] self.resize(env_out.get_size())", "canvas) | np.any(last_colors != colors, axis=-1)` # that re-uses buffers instead of creating", "# Swap canvas with last render: self.canvas, self._last_canvas = self._last_canvas, self.canvas self.colors, self._last_colors", "\"\"\" Dispatch mouse event to descendents until handled. \"\"\" any(widget.dispatch_click(mouse_event) for widget in", "np.any(last_colors != colors, axis=-1)` # that re-uses buffers instead of creating new arrays.)", "overlap = overlapping_region height, width = canvas.shape rect = Rect( 0, 0, height,", "..colors import Color from .widget import Widget, overlapping_region from .widget_data_structures import Point, Size,", "child.render(canvas[dest_slice], colors[dest_slice], child_rect) # Find differences between current render and last render: #", "is optimized version of `(last_canvas != canvas) | np.any(last_colors != colors, axis=-1)` #", "dim: Size): \"\"\" Resize canvas. Last render is erased. \"\"\" self.env_out.erase_screen() self.env_out.flush() self._dim", "= Rect( 0, 0, height, width, height, width, ) for child in self.children:", "np.zeros_like(self.canvas, dtype=np.bool8) for child in self.children: child.update_geometry() @property def top(self): return 0 @property", "color_diffs = self._color_diffs reduced_color_diffs = self._reduced_color_diffs env_out = self.env_out write = env_out._buffer.append #", "return None @property def root(self): return self @property def app(self): return self._app def", "def dispatch_press(self, key_press): \"\"\" Dispatch key press to descendants until handled. \"\"\" any(widget.dispatch_press(key_press)", "# Find differences between current render and last render: # (This is optimized", "self.canvas, self._last_canvas = self._last_canvas, self.canvas self.colors, self._last_colors = self._last_colors, self.colors # Bring arrays", "return 0 @property def left(self): return 0 @property def pos(self): return Point(0, 0)", "_Root(Widget): \"\"\" Root widget. Meant to be instantiated by the `App` class. Renders", "\"><\" will guarantee an entire screen redraw. self.colors = self._last_colors.copy() # Buffer arrays", "def root(self): return self @property def app(self): return self._app def absolute_to_relative_coords(self, coord): return", "for widget in reversed(self.children)) def dispatch_click(self, mouse_event): \"\"\" Dispatch mouse event to descendents", "env_out, default_char, default_color: Color): self._app = app self.env_out = env_out self.default_char = default_char", "Point(0, 0) @property def is_transparent(self): return False @property def is_visible(self): return True @property", "overlapping_region from .widget_data_structures import Point, Size, Rect class _Root(Widget): \"\"\" Root widget. Meant", "np.zeros_like(self.canvas, dtype=np.bool8) self._color_diffs = np.zeros_like(self.colors, dtype=np.bool8) self._reduced_color_diffs = np.zeros_like(self.canvas, dtype=np.bool8) for child in", "env_out = self.env_out write = env_out._buffer.append # Erase canvas: canvas[:] = self.default_char colors[:,", "canvas = self.canvas colors = self.colors char_diffs = self._char_diffs color_diffs = self._color_diffs reduced_color_diffs", "1, x + 1, *color, char)) write(\"\\x1b[0m\") # Reset attributes env_out.flush() def dispatch_press(self,", "write(\"\\x1b[0m\") # Reset attributes env_out.flush() def dispatch_press(self, key_press): \"\"\" Dispatch key press to", "self.default_char colors[:, :] = self.default_color overlap = overlapping_region height, width = canvas.shape rect", "Dispatch key press to descendants until handled. \"\"\" any(widget.dispatch_press(key_press) for widget in reversed(self.children))", "self._char_diffs color_diffs = self._color_diffs reduced_color_diffs = self._reduced_color_diffs env_out = self.env_out write = env_out._buffer.append", "= dim self._last_canvas = np.full(dim, self.default_char, dtype=object) self._last_colors = np.full((*dim, 6), self.default_color, dtype=np.uint8)", "def pos(self): return Point(0, 0) @property def absolute_pos(self): return Point(0, 0) @property def", "for y, x, color, char in zip(ys, xs, colors[ys, xs], canvas[ys, xs]): #", "@property def is_visible(self): return True @property def parent(self): return None @property def root(self):", "coord def render(self): \"\"\" Paint canvas. Render to terminal. \"\"\" # Swap canvas", "dtype=np.bool8) for child in self.children: child.update_geometry() @property def top(self): return 0 @property def", "0, height, width, height, width, ) for child in self.children: if region :=", "mouse_event): \"\"\" Dispatch mouse event to descendents until handled. \"\"\" any(widget.dispatch_click(mouse_event) for widget", "self._reduced_color_diffs = np.zeros_like(self.canvas, dtype=np.bool8) for child in self.children: child.update_geometry() @property def top(self): return", "canvas.shape rect = Rect( 0, 0, height, width, height, width, ) for child", "redraw. self.colors = self._last_colors.copy() # Buffer arrays to re-use in the `render` method:", "@property def absolute_pos(self): return Point(0, 0) @property def is_transparent(self): return False @property def", "pos(self): return Point(0, 0) @property def absolute_pos(self): return Point(0, 0) @property def is_transparent(self):", "self._app def absolute_to_relative_coords(self, coord): return coord def render(self): \"\"\" Paint canvas. Render to", "default_color: Color): self._app = app self.env_out = env_out self.default_char = default_char self.default_color =", "write(\"\\x1b[?25l\") # Hide cursor ys, xs = np.nonzero(char_diffs) for y, x, color, char", "# (This is optimized version of `(last_canvas != canvas) | np.any(last_colors != colors,", "# Erase canvas: canvas[:] = self.default_char colors[:, :] = self.default_color overlap = overlapping_region", "self.canvas colors = self.colors char_diffs = self._char_diffs color_diffs = self._color_diffs reduced_color_diffs = self._reduced_color_diffs", "def is_visible(self): return True @property def parent(self): return None @property def root(self): return", "key_press): \"\"\" Dispatch key press to descendants until handled. \"\"\" any(widget.dispatch_press(key_press) for widget", "will guarantee an entire screen redraw. self.colors = self._last_colors.copy() # Buffer arrays to", "canvas with last render: self.canvas, self._last_canvas = self._last_canvas, self.canvas self.colors, self._last_colors = self._last_colors,", "return coord def render(self): \"\"\" Paint canvas. Render to terminal. \"\"\" # Swap", "is erased. \"\"\" self.env_out.erase_screen() self.env_out.flush() self._dim = dim self._last_canvas = np.full(dim, self.default_char, dtype=object)", "app, env_out, default_char, default_color: Color): self._app = app self.env_out = env_out self.default_char =", "= self.default_char colors[:, :] = self.default_color overlap = overlapping_region height, width = canvas.shape", "+ 1, *color, char)) write(\"\\x1b[0m\") # Reset attributes env_out.flush() def dispatch_press(self, key_press): \"\"\"", "color concatenated: write(\"\\x1b[{};{}H\\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}\".format(y + 1, x + 1, *color, char)) write(\"\\x1b[0m\") # Reset", "self.children = [ ] self.resize(env_out.get_size()) def resize(self, dim: Size): \"\"\" Resize canvas. Last", "any(widget.dispatch_press(key_press) for widget in reversed(self.children)) def dispatch_click(self, mouse_event): \"\"\" Dispatch mouse event to", "child in self.children: if region := overlap(rect, child): dest_slice, child_rect = region child.render(canvas[dest_slice],", "colors = self.colors char_diffs = self._char_diffs color_diffs = self._color_diffs reduced_color_diffs = self._reduced_color_diffs env_out", "height, width, ) for child in self.children: if region := overlap(rect, child): dest_slice,", "self.children: child.update_geometry() @property def top(self): return 0 @property def left(self): return 0 @property", "def dispatch_click(self, mouse_event): \"\"\" Dispatch mouse event to descendents until handled. \"\"\" any(widget.dispatch_click(mouse_event)", "= np.zeros_like(self.canvas, dtype=np.bool8) for child in self.children: child.update_geometry() @property def top(self): return 0", "self.env_out = env_out self.default_char = default_char self.default_color = default_color self.children = [ ]" ]
[ "page \") print (\"Page \" + url + \" may not exist\") browser.close()", "\"\\n\" \"\\\"headline\\\": \") + str(article_headline_list[j]) + \"\\n\\\"author\\\": \\\"\" + str(writer_list[j]) + \"\\\"\\n\\\"date\\\": \"", "List Variables outputList = [] article_headline_list = [] writer_list = [] time_posted_list =", "Variables totalRecords=0 totalRecordsOut=0 page=-1 timestamp=0 browser = mechanicalsoup.StatefulBrowser() On_This_Page = False logged_in =", "\"%A %B %d %Y %I %M%p\") return (t2-datetime(1970,1,1)).total_seconds() except Exception: print (\"Error while", "writer = writer.replace(' ', '') writer = writer.replace('Posted','') writer = writer.replace('by','') writer_list.append(writer) #", "= [] # Count Variables totalRecords=0 totalRecordsOut=0 page=-1 timestamp=0 browser = mechanicalsoup.StatefulBrowser() On_This_Page", "per format required for j in range(totalRecords): if (int(time_posted_list[j]) < timestamp): On_This_Page =", "+ \"\\n\\\"author\\\": \\\"\" + str(writer_list[j]) + \"\\\"\\n\\\"date\\\": \" + str(int(time_posted_list[j])) + \"\\n},\\n\" )", "a valid number\" browser.select_form(nr=1) browser['unickname'] = nick browser['upasswd'] = passw result = browser.submit_selected()", "= browser.submit_selected() response = result.content soup_0 = BeautifulSoup(response, \"lxml\") user = str(soup_0.find_all(class_=\"user-access\")) if", "time_posted: time_posted_list.append(convertTime(t.get(\"datetime\"))) for val in poster: writer = val.find(text=True) writer = \" \".join(re.split(\"\\s+\",", "re import time from datetime import datetime import urllib import mechanicalsoup import getpass", "str(int(time_posted_list[j])) + \"\\n},\\n\" ) totalRecordsOut+=1; # All records on page within timeframe, open", "url = str('https://slashdot.org/?page=') + str(page+1) # Display this message while loading other pages", "seconds\") return 1 url = 'https://slashdot.org/' # List Variables outputList = [] article_headline_list", "date found while not(On_This_Page): page+=1 try: browser.open(url) except Exception: print (\"Error cannot open", "required for j in range(totalRecords): if (int(time_posted_list[j]) < timestamp): On_This_Page = True break", "article_headline_list.append(title) #Get Text headline totalRecords+=1 for t in time_posted: time_posted_list.append(convertTime(t.get(\"datetime\"))) for val in", "result = browser.submit_selected() response = result.content soup_0 = BeautifulSoup(response, \"lxml\") user = str(soup_0.find_all(class_=\"user-access\"))", "browser.select_form(nr=1) browser['unickname'] = nick browser['upasswd'] = passw result = browser.submit_selected() response = result.content", "else: print (\"Try Again\\n\") time.sleep(5) # Loop until date found while not(On_This_Page): page+=1", "outputList.append(str( \"{\" \"\\n\" \"\\\"headline\\\": \") + str(article_headline_list[j]) + \"\\n\\\"author\\\": \\\"\" + str(writer_list[j]) +", "break #release resources # HTML to BeautifulSoup response = \"\" response=result.content soup =", "< timestamp): On_This_Page = True break else: outputList.append(str( \"{\" \"\\n\" \"\\\"headline\\\": \") +", "= [] writer_list = [] time_posted_list = [] response = [] # Count", "page+=1 try: browser.open(url) except Exception: print (\"Error cannot open next page \") print", "article_headline = soup.find_all('span',class_=\"story-title\") poster = soup.find_all('span', class_=\"story-byline\") time_posted = soup.find_all('time') # Store all", "\")) # 1535241600 except Exception: \"Not a valid number\" browser.select_form(nr=1) browser['unickname'] = nick", "%d %Y %I %M%p\") return (t2-datetime(1970,1,1)).total_seconds() except Exception: print (\"Error while converting time", "user.find(nick)>0: logged_in=True print (\"Logged in\") else: print (\"Try Again\\n\") time.sleep(5) # Loop until", "until logged in browser.open(url) while not logged_in: nick = input(\"Enter your nickname for", "Again\\n\") time.sleep(5) # Loop until date found while not(On_This_Page): page+=1 try: browser.open(url) except", "Display this message while loading other pages print (\"Opening next page \" +", "timestamp = int(input(\"Enter timestamp in seconds since 1970: \")) # 1535241600 except Exception:", "= soup.find_all('time') # Store all required info for headline in article_headline: title =", "= writer.replace('Posted','') writer = writer.replace('by','') writer_list.append(writer) # Make output List as per format", "totalRecordsOut%totalRecords == 0: totalRecordsOut=0 url = str('https://slashdot.org/?page=') + str(page+1) # Display this message", "else: outputList.append(str( \"{\" \"\\n\" \"\\\"headline\\\": \") + str(article_headline_list[j]) + \"\\n\\\"author\\\": \\\"\" + str(writer_list[j])", "time_posted = soup.find_all('time') # Store all required info for headline in article_headline: title", "\" + str(int(time_posted_list[j])) + \"\\n},\\n\" ) totalRecordsOut+=1; # All records on page within", "converting time to seconds\") return 1 url = 'https://slashdot.org/' # List Variables outputList", "datetime import datetime import urllib import mechanicalsoup import getpass from bs4 import BeautifulSoup", "1535241600 except Exception: \"Not a valid number\" browser.select_form(nr=1) browser['unickname'] = nick browser['upasswd'] =", "= passw result = browser.submit_selected() response = result.content soup_0 = BeautifulSoup(response, \"lxml\") user", "mechanicalsoup import getpass from bs4 import BeautifulSoup # Conver Time Function def convertTime(time):", "[] time_posted_list = [] response = [] # Count Variables totalRecords=0 totalRecordsOut=0 page=-1", "print (\"Page \" + url + \" may not exist\") browser.close() break #release", "+ \"\\\"\\n\\\"date\\\": \" + str(int(time_posted_list[j])) + \"\\n},\\n\" ) totalRecordsOut+=1; # All records on", "return 1 url = 'https://slashdot.org/' # List Variables outputList = [] article_headline_list =", "article_headline: title = '\\\"'+headline.a.get_text()+'\\\"' article_headline_list.append(title) #Get Text headline totalRecords+=1 for t in time_posted:", "%Y %I %M%p\") return (t2-datetime(1970,1,1)).total_seconds() except Exception: print (\"Error while converting time to", "page if totalRecordsOut%totalRecords == 0: totalRecordsOut=0 url = str('https://slashdot.org/?page=') + str(page+1) # Display", "all Headlines article_headline = soup.find_all('span',class_=\"story-title\") poster = soup.find_all('span', class_=\"story-byline\") time_posted = soup.find_all('time') #", "== 0: totalRecordsOut=0 url = str('https://slashdot.org/?page=') + str(page+1) # Display this message while", "time = time.replace(\",\",\"\").replace(\"@\",\"\").replace(\".\",\" \").replace(\":\",\" \") t2 = datetime.strptime(time[3:], \"%A %B %d %Y %I", "not exist\") browser.close() break #release resources # HTML to BeautifulSoup response = \"\"", "\") #<PASSWORD> while(timestamp<1): try: timestamp = int(input(\"Enter timestamp in seconds since 1970: \"))", "headline totalRecords+=1 for t in time_posted: time_posted_list.append(convertTime(t.get(\"datetime\"))) for val in poster: writer =", "= [] article_headline_list = [] writer_list = [] time_posted_list = [] response =", "(\"Page \" + url + \" may not exist\") browser.close() break #release resources", "mechanicalsoup.StatefulBrowser() On_This_Page = False logged_in = False # Loop until logged in browser.open(url)", "= BeautifulSoup(response, \"lxml\") user = str(soup_0.find_all(class_=\"user-access\")) if user.find(nick)>0: logged_in=True print (\"Logged in\") else:", "article_headline_list = [] writer_list = [] time_posted_list = [] response = [] #", "str(page+1) # Display this message while loading other pages print (\"Opening next page", "+ str(writer_list[j]) + \"\\\"\\n\\\"date\\\": \" + str(int(time_posted_list[j])) + \"\\n},\\n\" ) totalRecordsOut+=1; # All", "response = \"\" response=result.content soup = \"\" soup = BeautifulSoup(response, \"lxml\") # Find", "your password: \") #<PASSWORD> while(timestamp<1): try: timestamp = int(input(\"Enter timestamp in seconds since", "val.find(text=True) writer = \" \".join(re.split(\"\\s+\", writer, flags=re.UNICODE)) writer = writer.replace(' ', '') writer", "writer = writer.replace('Posted','') writer = writer.replace('by','') writer_list.append(writer) # Make output List as per", "input(\"Enter your nickname for slashdot.org: \") #Chazzio1 passw = getpass.getpass(\"Enter your password: \")", "poster: writer = val.find(text=True) writer = \" \".join(re.split(\"\\s+\", writer, flags=re.UNICODE)) writer = writer.replace('", "= str('https://slashdot.org/?page=') + str(page+1) # Display this message while loading other pages print", "not(On_This_Page): page+=1 try: browser.open(url) except Exception: print (\"Error cannot open next page \")", "range(totalRecords): if (int(time_posted_list[j]) < timestamp): On_This_Page = True break else: outputList.append(str( \"{\" \"\\n\"", "(\"Try Again\\n\") time.sleep(5) # Loop until date found while not(On_This_Page): page+=1 try: browser.open(url)", "timestamp in seconds since 1970: \")) # 1535241600 except Exception: \"Not a valid", "slashdot.org: \") #Chazzio1 passw = getpass.getpass(\"Enter your password: \") #<PASSWORD> while(timestamp<1): try: timestamp", "browser['unickname'] = nick browser['upasswd'] = passw result = browser.submit_selected() response = result.content soup_0", "import datetime import urllib import mechanicalsoup import getpass from bs4 import BeautifulSoup #", "HTML to BeautifulSoup response = \"\" response=result.content soup = \"\" soup = BeautifulSoup(response,", "\") + str(article_headline_list[j]) + \"\\n\\\"author\\\": \\\"\" + str(writer_list[j]) + \"\\\"\\n\\\"date\\\": \" + str(int(time_posted_list[j]))", "nickname for slashdot.org: \") #Chazzio1 passw = getpass.getpass(\"Enter your password: \") #<PASSWORD> while(timestamp<1):", "\") #Chazzio1 passw = getpass.getpass(\"Enter your password: \") #<PASSWORD> while(timestamp<1): try: timestamp =", "nick = input(\"Enter your nickname for slashdot.org: \") #Chazzio1 passw = getpass.getpass(\"Enter your", "= soup.find_all('span', class_=\"story-byline\") time_posted = soup.find_all('time') # Store all required info for headline", "outputList = [] article_headline_list = [] writer_list = [] time_posted_list = [] response", "\"Not a valid number\" browser.select_form(nr=1) browser['unickname'] = nick browser['upasswd'] = passw result =", "browser.close() break #release resources # HTML to BeautifulSoup response = \"\" response=result.content soup", "as per format required for j in range(totalRecords): if (int(time_posted_list[j]) < timestamp): On_This_Page", "writer = writer.replace('by','') writer_list.append(writer) # Make output List as per format required for", "timestamp): On_This_Page = True break else: outputList.append(str( \"{\" \"\\n\" \"\\\"headline\\\": \") + str(article_headline_list[j])", "time.replace(\",\",\"\").replace(\"@\",\"\").replace(\".\",\" \").replace(\":\",\" \") t2 = datetime.strptime(time[3:], \"%A %B %d %Y %I %M%p\") return", "# Find all Headlines article_headline = soup.find_all('span',class_=\"story-title\") poster = soup.find_all('span', class_=\"story-byline\") time_posted =", "getpass.getpass(\"Enter your password: \") #<PASSWORD> while(timestamp<1): try: timestamp = int(input(\"Enter timestamp in seconds", "Text headline totalRecords+=1 for t in time_posted: time_posted_list.append(convertTime(t.get(\"datetime\"))) for val in poster: writer", "(t2-datetime(1970,1,1)).total_seconds() except Exception: print (\"Error while converting time to seconds\") return 1 url", "nick browser['upasswd'] = passw result = browser.submit_selected() response = result.content soup_0 = BeautifulSoup(response,", "writer = \" \".join(re.split(\"\\s+\", writer, flags=re.UNICODE)) writer = writer.replace(' ', '') writer =", "try: browser.open(url) except Exception: print (\"Error cannot open next page \") print (\"Page", "writer = val.find(text=True) writer = \" \".join(re.split(\"\\s+\", writer, flags=re.UNICODE)) writer = writer.replace(' ',", "= writer.replace('by','') writer_list.append(writer) # Make output List as per format required for j", "time_posted_list.append(convertTime(t.get(\"datetime\"))) for val in poster: writer = val.find(text=True) writer = \" \".join(re.split(\"\\s+\", writer,", "= [] response = [] # Count Variables totalRecords=0 totalRecordsOut=0 page=-1 timestamp=0 browser", "bs4 import BeautifulSoup # Conver Time Function def convertTime(time): try: time = time.replace(\",\",\"\").replace(\"@\",\"\").replace(\".\",\"", "password: \") #<PASSWORD> while(timestamp<1): try: timestamp = int(input(\"Enter timestamp in seconds since 1970:", "in poster: writer = val.find(text=True) writer = \" \".join(re.split(\"\\s+\", writer, flags=re.UNICODE)) writer =", "to BeautifulSoup response = \"\" response=result.content soup = \"\" soup = BeautifulSoup(response, \"lxml\")", "= False logged_in = False # Loop until logged in browser.open(url) while not", "Loop until logged in browser.open(url) while not logged_in: nick = input(\"Enter your nickname", "print (\"Logged in\") else: print (\"Try Again\\n\") time.sleep(5) # Loop until date found", "str(writer_list[j]) + \"\\\"\\n\\\"date\\\": \" + str(int(time_posted_list[j])) + \"\\n},\\n\" ) totalRecordsOut+=1; # All records", "for t in time_posted: time_posted_list.append(convertTime(t.get(\"datetime\"))) for val in poster: writer = val.find(text=True) writer", "#release resources # HTML to BeautifulSoup response = \"\" response=result.content soup = \"\"", "for j in range(totalRecords): if (int(time_posted_list[j]) < timestamp): On_This_Page = True break else:", "logged_in = False # Loop until logged in browser.open(url) while not logged_in: nick", "Find all Headlines article_headline = soup.find_all('span',class_=\"story-title\") poster = soup.find_all('span', class_=\"story-byline\") time_posted = soup.find_all('time')", "result.content soup_0 = BeautifulSoup(response, \"lxml\") user = str(soup_0.find_all(class_=\"user-access\")) if user.find(nick)>0: logged_in=True print (\"Logged", "# Store all required info for headline in article_headline: title = '\\\"'+headline.a.get_text()+'\\\"' article_headline_list.append(title)", "= 'https://slashdot.org/' # List Variables outputList = [] article_headline_list = [] writer_list =", "\"lxml\") # Find all Headlines article_headline = soup.find_all('span',class_=\"story-title\") poster = soup.find_all('span', class_=\"story-byline\") time_posted", "next page if totalRecordsOut%totalRecords == 0: totalRecordsOut=0 url = str('https://slashdot.org/?page=') + str(page+1) #", "import getpass from bs4 import BeautifulSoup # Conver Time Function def convertTime(time): try:", "page \" + url) for headline in outputList: print (headline) print (\"Total headlines", "\") print (\"Page \" + url + \" may not exist\") browser.close() break", "All records on page within timeframe, open next page if totalRecordsOut%totalRecords == 0:", "logged_in=True print (\"Logged in\") else: print (\"Try Again\\n\") time.sleep(5) # Loop until date", "print (\"Try Again\\n\") time.sleep(5) # Loop until date found while not(On_This_Page): page+=1 try:", "required info for headline in article_headline: title = '\\\"'+headline.a.get_text()+'\\\"' article_headline_list.append(title) #Get Text headline", "writer, flags=re.UNICODE)) writer = writer.replace(' ', '') writer = writer.replace('Posted','') writer = writer.replace('by','')", "message while loading other pages print (\"Opening next page \" + url) for", "soup_0 = BeautifulSoup(response, \"lxml\") user = str(soup_0.find_all(class_=\"user-access\")) if user.find(nick)>0: logged_in=True print (\"Logged in\")", "(int(time_posted_list[j]) < timestamp): On_This_Page = True break else: outputList.append(str( \"{\" \"\\n\" \"\\\"headline\\\": \")", "from bs4 import BeautifulSoup # Conver Time Function def convertTime(time): try: time =", "#Get Text headline totalRecords+=1 for t in time_posted: time_posted_list.append(convertTime(t.get(\"datetime\"))) for val in poster:", "= int(input(\"Enter timestamp in seconds since 1970: \")) # 1535241600 except Exception: \"Not", "pages print (\"Opening next page \" + url) for headline in outputList: print", "Loop until date found while not(On_This_Page): page+=1 try: browser.open(url) except Exception: print (\"Error", "Conver Time Function def convertTime(time): try: time = time.replace(\",\",\"\").replace(\"@\",\"\").replace(\".\",\" \").replace(\":\",\" \") t2 =", "browser['upasswd'] = passw result = browser.submit_selected() response = result.content soup_0 = BeautifulSoup(response, \"lxml\")", "time_posted_list = [] response = [] # Count Variables totalRecords=0 totalRecordsOut=0 page=-1 timestamp=0", "# Display this message while loading other pages print (\"Opening next page \"", "'https://slashdot.org/' # List Variables outputList = [] article_headline_list = [] writer_list = []", "time.sleep(5) # Loop until date found while not(On_This_Page): page+=1 try: browser.open(url) except Exception:", "totalRecords+=1 for t in time_posted: time_posted_list.append(convertTime(t.get(\"datetime\"))) for val in poster: writer = val.find(text=True)", "List as per format required for j in range(totalRecords): if (int(time_posted_list[j]) < timestamp):", "except Exception: print (\"Error while converting time to seconds\") return 1 url =", "\".join(re.split(\"\\s+\", writer, flags=re.UNICODE)) writer = writer.replace(' ', '') writer = writer.replace('Posted','') writer =", "= time.replace(\",\",\"\").replace(\"@\",\"\").replace(\".\",\" \").replace(\":\",\" \") t2 = datetime.strptime(time[3:], \"%A %B %d %Y %I %M%p\")", "\"\" soup = BeautifulSoup(response, \"lxml\") # Find all Headlines article_headline = soup.find_all('span',class_=\"story-title\") poster", "writer_list.append(writer) # Make output List as per format required for j in range(totalRecords):", "your nickname for slashdot.org: \") #Chazzio1 passw = getpass.getpass(\"Enter your password: \") #<PASSWORD>", "\" + url + \" may not exist\") browser.close() break #release resources #", "exist\") browser.close() break #release resources # HTML to BeautifulSoup response = \"\" response=result.content", "title = '\\\"'+headline.a.get_text()+'\\\"' article_headline_list.append(title) #Get Text headline totalRecords+=1 for t in time_posted: time_posted_list.append(convertTime(t.get(\"datetime\")))", "url + \" may not exist\") browser.close() break #release resources # HTML to", "Time Function def convertTime(time): try: time = time.replace(\",\",\"\").replace(\"@\",\"\").replace(\".\",\" \").replace(\":\",\" \") t2 = datetime.strptime(time[3:],", "browser.open(url) while not logged_in: nick = input(\"Enter your nickname for slashdot.org: \") #Chazzio1", "str(article_headline_list[j]) + \"\\n\\\"author\\\": \\\"\" + str(writer_list[j]) + \"\\\"\\n\\\"date\\\": \" + str(int(time_posted_list[j])) + \"\\n},\\n\"", "= \"\" soup = BeautifulSoup(response, \"lxml\") # Find all Headlines article_headline = soup.find_all('span',class_=\"story-title\")", "str(soup_0.find_all(class_=\"user-access\")) if user.find(nick)>0: logged_in=True print (\"Logged in\") else: print (\"Try Again\\n\") time.sleep(5) #", "# Count Variables totalRecords=0 totalRecordsOut=0 page=-1 timestamp=0 browser = mechanicalsoup.StatefulBrowser() On_This_Page = False", "while not logged_in: nick = input(\"Enter your nickname for slashdot.org: \") #Chazzio1 passw", "soup = \"\" soup = BeautifulSoup(response, \"lxml\") # Find all Headlines article_headline =", "on page within timeframe, open next page if totalRecordsOut%totalRecords == 0: totalRecordsOut=0 url", "to seconds\") return 1 url = 'https://slashdot.org/' # List Variables outputList = []", "= nick browser['upasswd'] = passw result = browser.submit_selected() response = result.content soup_0 =", "soup.find_all('span',class_=\"story-title\") poster = soup.find_all('span', class_=\"story-byline\") time_posted = soup.find_all('time') # Store all required info", "+ \"\\n},\\n\" ) totalRecordsOut+=1; # All records on page within timeframe, open next", "time from datetime import datetime import urllib import mechanicalsoup import getpass from bs4", "%I %M%p\") return (t2-datetime(1970,1,1)).total_seconds() except Exception: print (\"Error while converting time to seconds\")", "info for headline in article_headline: title = '\\\"'+headline.a.get_text()+'\\\"' article_headline_list.append(title) #Get Text headline totalRecords+=1", "False # Loop until logged in browser.open(url) while not logged_in: nick = input(\"Enter", "Exception: print (\"Error while converting time to seconds\") return 1 url = 'https://slashdot.org/'", "in browser.open(url) while not logged_in: nick = input(\"Enter your nickname for slashdot.org: \")", "True break else: outputList.append(str( \"{\" \"\\n\" \"\\\"headline\\\": \") + str(article_headline_list[j]) + \"\\n\\\"author\\\": \\\"\"", "+ str(int(time_posted_list[j])) + \"\\n},\\n\" ) totalRecordsOut+=1; # All records on page within timeframe,", "\"lxml\") user = str(soup_0.find_all(class_=\"user-access\")) if user.find(nick)>0: logged_in=True print (\"Logged in\") else: print (\"Try", "Headlines article_headline = soup.find_all('span',class_=\"story-title\") poster = soup.find_all('span', class_=\"story-byline\") time_posted = soup.find_all('time') # Store", "response = [] # Count Variables totalRecords=0 totalRecordsOut=0 page=-1 timestamp=0 browser = mechanicalsoup.StatefulBrowser()", "+ str(article_headline_list[j]) + \"\\n\\\"author\\\": \\\"\" + str(writer_list[j]) + \"\\\"\\n\\\"date\\\": \" + str(int(time_posted_list[j])) +", "1 url = 'https://slashdot.org/' # List Variables outputList = [] article_headline_list = []", "while converting time to seconds\") return 1 url = 'https://slashdot.org/' # List Variables", "try: time = time.replace(\",\",\"\").replace(\"@\",\"\").replace(\".\",\" \").replace(\":\",\" \") t2 = datetime.strptime(time[3:], \"%A %B %d %Y", "soup.find_all('span', class_=\"story-byline\") time_posted = soup.find_all('time') # Store all required info for headline in", "writer.replace(' ', '') writer = writer.replace('Posted','') writer = writer.replace('by','') writer_list.append(writer) # Make output", "False logged_in = False # Loop until logged in browser.open(url) while not logged_in:", "import mechanicalsoup import getpass from bs4 import BeautifulSoup # Conver Time Function def", "\" may not exist\") browser.close() break #release resources # HTML to BeautifulSoup response", "datetime import urllib import mechanicalsoup import getpass from bs4 import BeautifulSoup # Conver", "Function def convertTime(time): try: time = time.replace(\",\",\"\").replace(\"@\",\"\").replace(\".\",\" \").replace(\":\",\" \") t2 = datetime.strptime(time[3:], \"%A", "(\"Error while converting time to seconds\") return 1 url = 'https://slashdot.org/' # List", "cannot open next page \") print (\"Page \" + url + \" may", "\"\\\"\\n\\\"date\\\": \" + str(int(time_posted_list[j])) + \"\\n},\\n\" ) totalRecordsOut+=1; # All records on page", "user = str(soup_0.find_all(class_=\"user-access\")) if user.find(nick)>0: logged_in=True print (\"Logged in\") else: print (\"Try Again\\n\")", "\" + url) for headline in outputList: print (headline) print (\"Total headlines returned:", "while(timestamp<1): try: timestamp = int(input(\"Enter timestamp in seconds since 1970: \")) # 1535241600", "try: timestamp = int(input(\"Enter timestamp in seconds since 1970: \")) # 1535241600 except", "since 1970: \")) # 1535241600 except Exception: \"Not a valid number\" browser.select_form(nr=1) browser['unickname']", "next page \" + url) for headline in outputList: print (headline) print (\"Total", "# Loop until logged in browser.open(url) while not logged_in: nick = input(\"Enter your", "page=-1 timestamp=0 browser = mechanicalsoup.StatefulBrowser() On_This_Page = False logged_in = False # Loop", "timeframe, open next page if totalRecordsOut%totalRecords == 0: totalRecordsOut=0 url = str('https://slashdot.org/?page=') +", "headline in article_headline: title = '\\\"'+headline.a.get_text()+'\\\"' article_headline_list.append(title) #Get Text headline totalRecords+=1 for t", "logged in browser.open(url) while not logged_in: nick = input(\"Enter your nickname for slashdot.org:", "in\") else: print (\"Try Again\\n\") time.sleep(5) # Loop until date found while not(On_This_Page):", "# 1535241600 except Exception: \"Not a valid number\" browser.select_form(nr=1) browser['unickname'] = nick browser['upasswd']", "1970: \")) # 1535241600 except Exception: \"Not a valid number\" browser.select_form(nr=1) browser['unickname'] =", "'') writer = writer.replace('Posted','') writer = writer.replace('by','') writer_list.append(writer) # Make output List as", "= writer.replace(' ', '') writer = writer.replace('Posted','') writer = writer.replace('by','') writer_list.append(writer) # Make", "On_This_Page = True break else: outputList.append(str( \"{\" \"\\n\" \"\\\"headline\\\": \") + str(article_headline_list[j]) +", "Store all required info for headline in article_headline: title = '\\\"'+headline.a.get_text()+'\\\"' article_headline_list.append(title) #Get", "open next page \") print (\"Page \" + url + \" may not", "resources # HTML to BeautifulSoup response = \"\" response=result.content soup = \"\" soup", "# Loop until date found while not(On_This_Page): page+=1 try: browser.open(url) except Exception: print", "val in poster: writer = val.find(text=True) writer = \" \".join(re.split(\"\\s+\", writer, flags=re.UNICODE)) writer", "url) for headline in outputList: print (headline) print (\"Total headlines returned: \" +", "= '\\\"'+headline.a.get_text()+'\\\"' article_headline_list.append(title) #Get Text headline totalRecords+=1 for t in time_posted: time_posted_list.append(convertTime(t.get(\"datetime\"))) for", "', '') writer = writer.replace('Posted','') writer = writer.replace('by','') writer_list.append(writer) # Make output List", "return (t2-datetime(1970,1,1)).total_seconds() except Exception: print (\"Error while converting time to seconds\") return 1", "totalRecords=0 totalRecordsOut=0 page=-1 timestamp=0 browser = mechanicalsoup.StatefulBrowser() On_This_Page = False logged_in = False", "passw = getpass.getpass(\"Enter your password: \") #<PASSWORD> while(timestamp<1): try: timestamp = int(input(\"Enter timestamp", "Make output List as per format required for j in range(totalRecords): if (int(time_posted_list[j])", "print (\"Opening next page \" + url) for headline in outputList: print (headline)", "(\"Logged in\") else: print (\"Try Again\\n\") time.sleep(5) # Loop until date found while", "= str(soup_0.find_all(class_=\"user-access\")) if user.find(nick)>0: logged_in=True print (\"Logged in\") else: print (\"Try Again\\n\") time.sleep(5)", "browser.open(url) except Exception: print (\"Error cannot open next page \") print (\"Page \"", "= mechanicalsoup.StatefulBrowser() On_This_Page = False logged_in = False # Loop until logged in", "= datetime.strptime(time[3:], \"%A %B %d %Y %I %M%p\") return (t2-datetime(1970,1,1)).total_seconds() except Exception: print", "\"\\n},\\n\" ) totalRecordsOut+=1; # All records on page within timeframe, open next page", "loading other pages print (\"Opening next page \" + url) for headline in", "until date found while not(On_This_Page): page+=1 try: browser.open(url) except Exception: print (\"Error cannot", ") totalRecordsOut+=1; # All records on page within timeframe, open next page if", "browser = mechanicalsoup.StatefulBrowser() On_This_Page = False logged_in = False # Loop until logged", "= val.find(text=True) writer = \" \".join(re.split(\"\\s+\", writer, flags=re.UNICODE)) writer = writer.replace(' ', '')", "open next page if totalRecordsOut%totalRecords == 0: totalRecordsOut=0 url = str('https://slashdot.org/?page=') + str(page+1)", "j in range(totalRecords): if (int(time_posted_list[j]) < timestamp): On_This_Page = True break else: outputList.append(str(", "[] writer_list = [] time_posted_list = [] response = [] # Count Variables", "'\\\"'+headline.a.get_text()+'\\\"' article_headline_list.append(title) #Get Text headline totalRecords+=1 for t in time_posted: time_posted_list.append(convertTime(t.get(\"datetime\"))) for val", "next page \") print (\"Page \" + url + \" may not exist\")", "passw result = browser.submit_selected() response = result.content soup_0 = BeautifulSoup(response, \"lxml\") user =", "%M%p\") return (t2-datetime(1970,1,1)).total_seconds() except Exception: print (\"Error while converting time to seconds\") return", "not logged_in: nick = input(\"Enter your nickname for slashdot.org: \") #Chazzio1 passw =", "records on page within timeframe, open next page if totalRecordsOut%totalRecords == 0: totalRecordsOut=0", "soup = BeautifulSoup(response, \"lxml\") # Find all Headlines article_headline = soup.find_all('span',class_=\"story-title\") poster =", "# All records on page within timeframe, open next page if totalRecordsOut%totalRecords ==", "= True break else: outputList.append(str( \"{\" \"\\n\" \"\\\"headline\\\": \") + str(article_headline_list[j]) + \"\\n\\\"author\\\":", "\\\"\" + str(writer_list[j]) + \"\\\"\\n\\\"date\\\": \" + str(int(time_posted_list[j])) + \"\\n},\\n\" ) totalRecordsOut+=1; #", "= False # Loop until logged in browser.open(url) while not logged_in: nick =", "browser.submit_selected() response = result.content soup_0 = BeautifulSoup(response, \"lxml\") user = str(soup_0.find_all(class_=\"user-access\")) if user.find(nick)>0:", "datetime.strptime(time[3:], \"%A %B %d %Y %I %M%p\") return (t2-datetime(1970,1,1)).total_seconds() except Exception: print (\"Error", "= soup.find_all('span',class_=\"story-title\") poster = soup.find_all('span', class_=\"story-byline\") time_posted = soup.find_all('time') # Store all required", "if totalRecordsOut%totalRecords == 0: totalRecordsOut=0 url = str('https://slashdot.org/?page=') + str(page+1) # Display this", "# Conver Time Function def convertTime(time): try: time = time.replace(\",\",\"\").replace(\"@\",\"\").replace(\".\",\" \").replace(\":\",\" \") t2", "format required for j in range(totalRecords): if (int(time_posted_list[j]) < timestamp): On_This_Page = True", "url = 'https://slashdot.org/' # List Variables outputList = [] article_headline_list = [] writer_list", "%B %d %Y %I %M%p\") return (t2-datetime(1970,1,1)).total_seconds() except Exception: print (\"Error while converting", "import urllib import mechanicalsoup import getpass from bs4 import BeautifulSoup # Conver Time", "in range(totalRecords): if (int(time_posted_list[j]) < timestamp): On_This_Page = True break else: outputList.append(str( \"{\"", "in article_headline: title = '\\\"'+headline.a.get_text()+'\\\"' article_headline_list.append(title) #Get Text headline totalRecords+=1 for t in", "found while not(On_This_Page): page+=1 try: browser.open(url) except Exception: print (\"Error cannot open next", "import time from datetime import datetime import urllib import mechanicalsoup import getpass from", "for headline in outputList: print (headline) print (\"Total headlines returned: \" + str(totalRecordsOut))", "Exception: print (\"Error cannot open next page \") print (\"Page \" + url", "poster = soup.find_all('span', class_=\"story-byline\") time_posted = soup.find_all('time') # Store all required info for", "print (\"Error while converting time to seconds\") return 1 url = 'https://slashdot.org/' #", "except Exception: \"Not a valid number\" browser.select_form(nr=1) browser['unickname'] = nick browser['upasswd'] = passw", "\"\" response=result.content soup = \"\" soup = BeautifulSoup(response, \"lxml\") # Find all Headlines", "\").replace(\":\",\" \") t2 = datetime.strptime(time[3:], \"%A %B %d %Y %I %M%p\") return (t2-datetime(1970,1,1)).total_seconds()", "# HTML to BeautifulSoup response = \"\" response=result.content soup = \"\" soup =", "may not exist\") browser.close() break #release resources # HTML to BeautifulSoup response =", "import BeautifulSoup # Conver Time Function def convertTime(time): try: time = time.replace(\",\",\"\").replace(\"@\",\"\").replace(\".\",\" \").replace(\":\",\"", "= result.content soup_0 = BeautifulSoup(response, \"lxml\") user = str(soup_0.find_all(class_=\"user-access\")) if user.find(nick)>0: logged_in=True print", "headline in outputList: print (headline) print (\"Total headlines returned: \" + str(totalRecordsOut)) browser.close()", "if (int(time_posted_list[j]) < timestamp): On_This_Page = True break else: outputList.append(str( \"{\" \"\\n\" \"\\\"headline\\\":", "convertTime(time): try: time = time.replace(\",\",\"\").replace(\"@\",\"\").replace(\".\",\" \").replace(\":\",\" \") t2 = datetime.strptime(time[3:], \"%A %B %d", "getpass from bs4 import BeautifulSoup # Conver Time Function def convertTime(time): try: time", "\"{\" \"\\n\" \"\\\"headline\\\": \") + str(article_headline_list[j]) + \"\\n\\\"author\\\": \\\"\" + str(writer_list[j]) + \"\\\"\\n\\\"date\\\":", "[] # Count Variables totalRecords=0 totalRecordsOut=0 page=-1 timestamp=0 browser = mechanicalsoup.StatefulBrowser() On_This_Page =", "in time_posted: time_posted_list.append(convertTime(t.get(\"datetime\"))) for val in poster: writer = val.find(text=True) writer = \"", "= [] time_posted_list = [] response = [] # Count Variables totalRecords=0 totalRecordsOut=0", "On_This_Page = False logged_in = False # Loop until logged in browser.open(url) while", "+ url) for headline in outputList: print (headline) print (\"Total headlines returned: \"", "\" \".join(re.split(\"\\s+\", writer, flags=re.UNICODE)) writer = writer.replace(' ', '') writer = writer.replace('Posted','') writer", "other pages print (\"Opening next page \" + url) for headline in outputList:", "writer.replace('Posted','') writer = writer.replace('by','') writer_list.append(writer) # Make output List as per format required", "output List as per format required for j in range(totalRecords): if (int(time_posted_list[j]) <", "t2 = datetime.strptime(time[3:], \"%A %B %d %Y %I %M%p\") return (t2-datetime(1970,1,1)).total_seconds() except Exception:", "Variables outputList = [] article_headline_list = [] writer_list = [] time_posted_list = []", "#<PASSWORD> while(timestamp<1): try: timestamp = int(input(\"Enter timestamp in seconds since 1970: \")) #", "\"\\\"headline\\\": \") + str(article_headline_list[j]) + \"\\n\\\"author\\\": \\\"\" + str(writer_list[j]) + \"\\\"\\n\\\"date\\\": \" +", "= \" \".join(re.split(\"\\s+\", writer, flags=re.UNICODE)) writer = writer.replace(' ', '') writer = writer.replace('Posted','')", "response = result.content soup_0 = BeautifulSoup(response, \"lxml\") user = str(soup_0.find_all(class_=\"user-access\")) if user.find(nick)>0: logged_in=True", "BeautifulSoup # Conver Time Function def convertTime(time): try: time = time.replace(\",\",\"\").replace(\"@\",\"\").replace(\".\",\" \").replace(\":\",\" \")", "+ str(page+1) # Display this message while loading other pages print (\"Opening next", "totalRecordsOut=0 page=-1 timestamp=0 browser = mechanicalsoup.StatefulBrowser() On_This_Page = False logged_in = False #", "[] article_headline_list = [] writer_list = [] time_posted_list = [] response = []", "= getpass.getpass(\"Enter your password: \") #<PASSWORD> while(timestamp<1): try: timestamp = int(input(\"Enter timestamp in", "BeautifulSoup(response, \"lxml\") user = str(soup_0.find_all(class_=\"user-access\")) if user.find(nick)>0: logged_in=True print (\"Logged in\") else: print", "for val in poster: writer = val.find(text=True) writer = \" \".join(re.split(\"\\s+\", writer, flags=re.UNICODE))", "in seconds since 1970: \")) # 1535241600 except Exception: \"Not a valid number\"", "(\"Error cannot open next page \") print (\"Page \" + url + \"", "for headline in article_headline: title = '\\\"'+headline.a.get_text()+'\\\"' article_headline_list.append(title) #Get Text headline totalRecords+=1 for", "seconds since 1970: \")) # 1535241600 except Exception: \"Not a valid number\" browser.select_form(nr=1)", "[] response = [] # Count Variables totalRecords=0 totalRecordsOut=0 page=-1 timestamp=0 browser =", "writer.replace('by','') writer_list.append(writer) # Make output List as per format required for j in", "0: totalRecordsOut=0 url = str('https://slashdot.org/?page=') + str(page+1) # Display this message while loading", "= input(\"Enter your nickname for slashdot.org: \") #Chazzio1 passw = getpass.getpass(\"Enter your password:", "from datetime import datetime import urllib import mechanicalsoup import getpass from bs4 import", "totalRecordsOut+=1; # All records on page within timeframe, open next page if totalRecordsOut%totalRecords", "def convertTime(time): try: time = time.replace(\",\",\"\").replace(\"@\",\"\").replace(\".\",\" \").replace(\":\",\" \") t2 = datetime.strptime(time[3:], \"%A %B", "\"\\n\\\"author\\\": \\\"\" + str(writer_list[j]) + \"\\\"\\n\\\"date\\\": \" + str(int(time_posted_list[j])) + \"\\n},\\n\" ) totalRecordsOut+=1;", "urllib import mechanicalsoup import getpass from bs4 import BeautifulSoup # Conver Time Function", "page within timeframe, open next page if totalRecordsOut%totalRecords == 0: totalRecordsOut=0 url =", "totalRecordsOut=0 url = str('https://slashdot.org/?page=') + str(page+1) # Display this message while loading other", "writer_list = [] time_posted_list = [] response = [] # Count Variables totalRecords=0", "print (\"Error cannot open next page \") print (\"Page \" + url +", "except Exception: print (\"Error cannot open next page \") print (\"Page \" +", "# List Variables outputList = [] article_headline_list = [] writer_list = [] time_posted_list", "if user.find(nick)>0: logged_in=True print (\"Logged in\") else: print (\"Try Again\\n\") time.sleep(5) # Loop", "within timeframe, open next page if totalRecordsOut%totalRecords == 0: totalRecordsOut=0 url = str('https://slashdot.org/?page=')", "this message while loading other pages print (\"Opening next page \" + url)", "logged_in: nick = input(\"Enter your nickname for slashdot.org: \") #Chazzio1 passw = getpass.getpass(\"Enter", "# Make output List as per format required for j in range(totalRecords): if", "while loading other pages print (\"Opening next page \" + url) for headline", "(\"Opening next page \" + url) for headline in outputList: print (headline) print", "BeautifulSoup response = \"\" response=result.content soup = \"\" soup = BeautifulSoup(response, \"lxml\") #", "while not(On_This_Page): page+=1 try: browser.open(url) except Exception: print (\"Error cannot open next page", "Count Variables totalRecords=0 totalRecordsOut=0 page=-1 timestamp=0 browser = mechanicalsoup.StatefulBrowser() On_This_Page = False logged_in", "time to seconds\") return 1 url = 'https://slashdot.org/' # List Variables outputList =", "BeautifulSoup(response, \"lxml\") # Find all Headlines article_headline = soup.find_all('span',class_=\"story-title\") poster = soup.find_all('span', class_=\"story-byline\")", "= BeautifulSoup(response, \"lxml\") # Find all Headlines article_headline = soup.find_all('span',class_=\"story-title\") poster = soup.find_all('span',", "timestamp=0 browser = mechanicalsoup.StatefulBrowser() On_This_Page = False logged_in = False # Loop until", "+ url + \" may not exist\") browser.close() break #release resources # HTML", "int(input(\"Enter timestamp in seconds since 1970: \")) # 1535241600 except Exception: \"Not a", "+ \" may not exist\") browser.close() break #release resources # HTML to BeautifulSoup", "= \"\" response=result.content soup = \"\" soup = BeautifulSoup(response, \"lxml\") # Find all", "all required info for headline in article_headline: title = '\\\"'+headline.a.get_text()+'\\\"' article_headline_list.append(title) #Get Text", "import re import time from datetime import datetime import urllib import mechanicalsoup import", "valid number\" browser.select_form(nr=1) browser['unickname'] = nick browser['upasswd'] = passw result = browser.submit_selected() response", "str('https://slashdot.org/?page=') + str(page+1) # Display this message while loading other pages print (\"Opening", "number\" browser.select_form(nr=1) browser['unickname'] = nick browser['upasswd'] = passw result = browser.submit_selected() response =", "Exception: \"Not a valid number\" browser.select_form(nr=1) browser['unickname'] = nick browser['upasswd'] = passw result", "\") t2 = datetime.strptime(time[3:], \"%A %B %d %Y %I %M%p\") return (t2-datetime(1970,1,1)).total_seconds() except", "soup.find_all('time') # Store all required info for headline in article_headline: title = '\\\"'+headline.a.get_text()+'\\\"'", "class_=\"story-byline\") time_posted = soup.find_all('time') # Store all required info for headline in article_headline:", "flags=re.UNICODE)) writer = writer.replace(' ', '') writer = writer.replace('Posted','') writer = writer.replace('by','') writer_list.append(writer)", "#Chazzio1 passw = getpass.getpass(\"Enter your password: \") #<PASSWORD> while(timestamp<1): try: timestamp = int(input(\"Enter", "response=result.content soup = \"\" soup = BeautifulSoup(response, \"lxml\") # Find all Headlines article_headline", "for slashdot.org: \") #Chazzio1 passw = getpass.getpass(\"Enter your password: \") #<PASSWORD> while(timestamp<1): try:", "break else: outputList.append(str( \"{\" \"\\n\" \"\\\"headline\\\": \") + str(article_headline_list[j]) + \"\\n\\\"author\\\": \\\"\" +", "t in time_posted: time_posted_list.append(convertTime(t.get(\"datetime\"))) for val in poster: writer = val.find(text=True) writer =" ]
[ "Linux and Windows discarded_variables = ['LC_CTYPE', 'PWD', 'COMSPEC', 'PATHEXT', 'PROMPT', 'SYSTEMROOT'] # Windows", "k not in discarded_variables ).lower()) app = Application('foo', 'tests.test_environ:foos') def test_environ(): with Given(app,", "os.environ.copy() # For Linux and Windows discarded_variables = ['LC_CTYPE', 'PWD', 'COMSPEC', 'PATHEXT', 'PROMPT',", "cover e = os.environ.copy() # For Linux and Windows discarded_variables = ['LC_CTYPE', 'PWD',", "For Linux and Windows discarded_variables = ['LC_CTYPE', 'PWD', 'COMSPEC', 'PATHEXT', 'PROMPT', 'SYSTEMROOT'] #", "in discarded_variables ).lower()) app = Application('foo', 'tests.test_environ:foos') def test_environ(): with Given(app, environ={'bar': 'baz'}):", "lowercase them print(' '.join( f'{k}: {v}' for k, v in e.items() if k", "== 'bar: baz qux: quux\\n' when(environ=given | {'bar': 'quux'}) assert stdout == 'bar:", "given def foos(): # pragma: no cover e = os.environ.copy() # For Linux", "+ {'qux': 'quux'}) assert stdout == 'bar: baz qux: quux\\n' when(environ=given | {'bar':", "case-insensitive, lowercase them print(' '.join( f'{k}: {v}' for k, v in e.items() if", "not in discarded_variables ).lower()) app = Application('foo', 'tests.test_environ:foos') def test_environ(): with Given(app, environ={'bar':", "foos(): # pragma: no cover e = os.environ.copy() # For Linux and Windows", "discarded_variables = ['LC_CTYPE', 'PWD', 'COMSPEC', 'PATHEXT', 'PROMPT', 'SYSTEMROOT'] # Windows environment variables are", "discarded_variables ).lower()) app = Application('foo', 'tests.test_environ:foos') def test_environ(): with Given(app, environ={'bar': 'baz'}): assert", "'\\n' when(environ=given + {'qux': 'quux'}) assert stdout == 'bar: baz qux: quux\\n' when(environ=given", "app = Application('foo', 'tests.test_environ:foos') def test_environ(): with Given(app, environ={'bar': 'baz'}): assert stdout ==", "no cover e = os.environ.copy() # For Linux and Windows discarded_variables = ['LC_CTYPE',", "variables are case-insensitive, lowercase them print(' '.join( f'{k}: {v}' for k, v in", "'bar') assert stdout == '\\n' when(environ=given + {'qux': 'quux'}) assert stdout == 'bar:", "assert stdout == '\\n' when(environ=given + {'qux': 'quux'}) assert stdout == 'bar: baz", "Windows discarded_variables = ['LC_CTYPE', 'PWD', 'COMSPEC', 'PATHEXT', 'PROMPT', 'SYSTEMROOT'] # Windows environment variables", ").lower()) app = Application('foo', 'tests.test_environ:foos') def test_environ(): with Given(app, environ={'bar': 'baz'}): assert stdout", "'bar: baz qux: quux\\n' when(environ=given | {'bar': 'quux'}) assert stdout == 'bar: quux\\n'", "stdout, Application, when, given def foos(): # pragma: no cover e = os.environ.copy()", "Application('foo', 'tests.test_environ:foos') def test_environ(): with Given(app, environ={'bar': 'baz'}): assert stdout == 'bar: baz\\n'", "== 'bar: baz\\n' when(environ=given - 'bar') assert stdout == '\\n' when(environ=given + {'qux':", "environment variables are case-insensitive, lowercase them print(' '.join( f'{k}: {v}' for k, v", "Application, when, given def foos(): # pragma: no cover e = os.environ.copy() #", "'.join( f'{k}: {v}' for k, v in e.items() if k not in discarded_variables", "v in e.items() if k not in discarded_variables ).lower()) app = Application('foo', 'tests.test_environ:foos')", "with Given(app, environ={'bar': 'baz'}): assert stdout == 'bar: baz\\n' when(environ=given - 'bar') assert", "'bar: baz\\n' when(environ=given - 'bar') assert stdout == '\\n' when(environ=given + {'qux': 'quux'})", "if k not in discarded_variables ).lower()) app = Application('foo', 'tests.test_environ:foos') def test_environ(): with", "stdout == '\\n' when(environ=given + {'qux': 'quux'}) assert stdout == 'bar: baz qux:", "are case-insensitive, lowercase them print(' '.join( f'{k}: {v}' for k, v in e.items()", "e.items() if k not in discarded_variables ).lower()) app = Application('foo', 'tests.test_environ:foos') def test_environ():", "assert stdout == 'bar: baz qux: quux\\n' when(environ=given | {'bar': 'quux'}) assert stdout", "= ['LC_CTYPE', 'PWD', 'COMSPEC', 'PATHEXT', 'PROMPT', 'SYSTEMROOT'] # Windows environment variables are case-insensitive,", "f'{k}: {v}' for k, v in e.items() if k not in discarded_variables ).lower())", "when(environ=given + {'qux': 'quux'}) assert stdout == 'bar: baz qux: quux\\n' when(environ=given |", "k, v in e.items() if k not in discarded_variables ).lower()) app = Application('foo',", "environ={'bar': 'baz'}): assert stdout == 'bar: baz\\n' when(environ=given - 'bar') assert stdout ==", "in e.items() if k not in discarded_variables ).lower()) app = Application('foo', 'tests.test_environ:foos') def", "- 'bar') assert stdout == '\\n' when(environ=given + {'qux': 'quux'}) assert stdout ==", "{v}' for k, v in e.items() if k not in discarded_variables ).lower()) app", "for k, v in e.items() if k not in discarded_variables ).lower()) app =", "def test_environ(): with Given(app, environ={'bar': 'baz'}): assert stdout == 'bar: baz\\n' when(environ=given -", "Given, stdout, Application, when, given def foos(): # pragma: no cover e =", "= os.environ.copy() # For Linux and Windows discarded_variables = ['LC_CTYPE', 'PWD', 'COMSPEC', 'PATHEXT',", "when(environ=given - 'bar') assert stdout == '\\n' when(environ=given + {'qux': 'quux'}) assert stdout", "'quux'}) assert stdout == 'bar: baz qux: quux\\n' when(environ=given | {'bar': 'quux'}) assert", "'SYSTEMROOT'] # Windows environment variables are case-insensitive, lowercase them print(' '.join( f'{k}: {v}'", "'PATHEXT', 'PROMPT', 'SYSTEMROOT'] # Windows environment variables are case-insensitive, lowercase them print(' '.join(", "test_environ(): with Given(app, environ={'bar': 'baz'}): assert stdout == 'bar: baz\\n' when(environ=given - 'bar')", "stdout == 'bar: baz\\n' when(environ=given - 'bar') assert stdout == '\\n' when(environ=given +", "import Given, stdout, Application, when, given def foos(): # pragma: no cover e", "'COMSPEC', 'PATHEXT', 'PROMPT', 'SYSTEMROOT'] # Windows environment variables are case-insensitive, lowercase them print('", "Windows environment variables are case-insensitive, lowercase them print(' '.join( f'{k}: {v}' for k,", "stdout == 'bar: baz qux: quux\\n' when(environ=given | {'bar': 'quux'}) assert stdout ==", "# Windows environment variables are case-insensitive, lowercase them print(' '.join( f'{k}: {v}' for", "them print(' '.join( f'{k}: {v}' for k, v in e.items() if k not", "pragma: no cover e = os.environ.copy() # For Linux and Windows discarded_variables =", "# For Linux and Windows discarded_variables = ['LC_CTYPE', 'PWD', 'COMSPEC', 'PATHEXT', 'PROMPT', 'SYSTEMROOT']", "'tests.test_environ:foos') def test_environ(): with Given(app, environ={'bar': 'baz'}): assert stdout == 'bar: baz\\n' when(environ=given", "and Windows discarded_variables = ['LC_CTYPE', 'PWD', 'COMSPEC', 'PATHEXT', 'PROMPT', 'SYSTEMROOT'] # Windows environment", "e = os.environ.copy() # For Linux and Windows discarded_variables = ['LC_CTYPE', 'PWD', 'COMSPEC',", "= Application('foo', 'tests.test_environ:foos') def test_environ(): with Given(app, environ={'bar': 'baz'}): assert stdout == 'bar:", "Given(app, environ={'bar': 'baz'}): assert stdout == 'bar: baz\\n' when(environ=given - 'bar') assert stdout", "baz\\n' when(environ=given - 'bar') assert stdout == '\\n' when(environ=given + {'qux': 'quux'}) assert", "'PWD', 'COMSPEC', 'PATHEXT', 'PROMPT', 'SYSTEMROOT'] # Windows environment variables are case-insensitive, lowercase them", "# pragma: no cover e = os.environ.copy() # For Linux and Windows discarded_variables", "bddcli import Given, stdout, Application, when, given def foos(): # pragma: no cover", "'baz'}): assert stdout == 'bar: baz\\n' when(environ=given - 'bar') assert stdout == '\\n'", "from bddcli import Given, stdout, Application, when, given def foos(): # pragma: no", "['LC_CTYPE', 'PWD', 'COMSPEC', 'PATHEXT', 'PROMPT', 'SYSTEMROOT'] # Windows environment variables are case-insensitive, lowercase", "assert stdout == 'bar: baz\\n' when(environ=given - 'bar') assert stdout == '\\n' when(environ=given", "import os from bddcli import Given, stdout, Application, when, given def foos(): #", "os from bddcli import Given, stdout, Application, when, given def foos(): # pragma:", "'PROMPT', 'SYSTEMROOT'] # Windows environment variables are case-insensitive, lowercase them print(' '.join( f'{k}:", "print(' '.join( f'{k}: {v}' for k, v in e.items() if k not in", "def foos(): # pragma: no cover e = os.environ.copy() # For Linux and", "== '\\n' when(environ=given + {'qux': 'quux'}) assert stdout == 'bar: baz qux: quux\\n'", "when, given def foos(): # pragma: no cover e = os.environ.copy() # For", "{'qux': 'quux'}) assert stdout == 'bar: baz qux: quux\\n' when(environ=given | {'bar': 'quux'})" ]
[ "['*'] # If DEBUG is False, send the errors to the email: ADMINS", "= 'django.core.mail.backends.smtp.EmailBackend' # REDIS related settings CELERY_BROKER_URL = config('REDIS_URL', default=\"redis://\") CELERY_RESULT_BACKEND = config('REDIS_URL',", "= config('REDIS_URL', default=\"redis://\") CELERY_RESULT_BACKEND = config('REDIS_URL', default=\"redis://\") BROKER_URL = config('REDIS_URL', default=\"redis://\") # Allauth", "JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # Configure Django App for Heroku.", "'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'VEnCode_Django.urls' #", "send the errors to the email: ADMINS = [ ('Andre', '<EMAIL>'), ] MIDDLEWARE", "e-mail verification e-mail: EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # REDIS related settings CELERY_BROKER_URL = config('REDIS_URL',", "ADMINS = [ ('Andre', '<EMAIL>'), ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware',", "'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'VEnCode_Django.urls' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { 'default':", "# Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': config(\"DB_NAME\"),", "} # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' #", "to the email: ADMINS = [ ('Andre', '<EMAIL>'), ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware',", "Configure Django App for Heroku. django_heroku.settings(locals()) # Production set up for heroku: db_from_env", "= [ ('Andre', '<EMAIL>'), ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware',", "config(\"DB_HOST\"), 'PORT': '', } } # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/", "https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': config(\"DB_NAME\"), 'USER': config(\"DB_USER\"), 'PASSWORD':", "'PORT': '', } } # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATICFILES_STORAGE", "backend to send sign-in e-mail verification e-mail: EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # REDIS related", "= 'VEnCode_Django.urls' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2',", "= config(\"MAILGUN_SMTP_SERVER\") EMAIL_PORT = config(\"MAILGUN_SMTP_PORT\") EMAIL_HOST_USER = DEFAULT_FROM_EMAIL = config(\"MAILGUN_SMTP_LOGIN\") EMAIL_HOST_PASSWORD = config(\"MAILGUN_SMTP_PASSWORD\")", "('Andre', '<EMAIL>'), ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware',", "# SECURITY WARNING: don't run with debug turned on in production! DEBUG =", "# https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': config(\"DB_NAME\"), 'USER': config(\"DB_USER\"),", "to send sign-in e-mail verification e-mail: EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # REDIS related settings", "debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['*'] # If", "{ 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': config(\"DB_NAME\"), 'USER': config(\"DB_USER\"), 'PASSWORD': '', 'HOST': config(\"DB_HOST\"), 'PORT': '',", "verification e-mail: EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # REDIS related settings CELERY_BROKER_URL = config('REDIS_URL', default=\"redis://\")", "Production set up for heroku: db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Allauth configurations, backend", "] ROOT_URLCONF = 'VEnCode_Django.urls' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { 'default': {", "related settings EMAIL_HOST = config(\"MAILGUN_SMTP_SERVER\") EMAIL_PORT = config(\"MAILGUN_SMTP_PORT\") EMAIL_HOST_USER = DEFAULT_FROM_EMAIL = config(\"MAILGUN_SMTP_LOGIN\")", "'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'VEnCode_Django.urls' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES =", "import dj_database_url # SECURITY WARNING: don't run with debug turned on in production!", "set up for heroku: db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Allauth configurations, backend to", "'NAME': config(\"DB_NAME\"), 'USER': config(\"DB_USER\"), 'PASSWORD': '', 'HOST': config(\"DB_HOST\"), 'PORT': '', } } #", "config(\"DB_NAME\"), 'USER': config(\"DB_USER\"), 'PASSWORD': '', 'HOST': config(\"DB_HOST\"), 'PORT': '', } } # Static", "'', } } # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATICFILES_STORAGE =", "files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # Configure Django App", "db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Allauth configurations, backend to send sign-in e-mail verification", "= { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': config(\"DB_NAME\"), 'USER': config(\"DB_USER\"), 'PASSWORD': '', 'HOST':", "# Allauth related settings EMAIL_HOST = config(\"MAILGUN_SMTP_SERVER\") EMAIL_PORT = config(\"MAILGUN_SMTP_PORT\") EMAIL_HOST_USER = DEFAULT_FROM_EMAIL", "config('REDIS_URL', default=\"redis://\") BROKER_URL = config('REDIS_URL', default=\"redis://\") # Allauth related settings EMAIL_HOST = config(\"MAILGUN_SMTP_SERVER\")", "config('REDIS_URL', default=\"redis://\") # Allauth related settings EMAIL_HOST = config(\"MAILGUN_SMTP_SERVER\") EMAIL_PORT = config(\"MAILGUN_SMTP_PORT\") EMAIL_HOST_USER", "* import dj_database_url # SECURITY WARNING: don't run with debug turned on in", "# If DEBUG is False, send the errors to the email: ADMINS =", "ALLOWED_HOSTS = ['*'] # If DEBUG is False, send the errors to the", "default=\"redis://\") # Allauth related settings EMAIL_HOST = config(\"MAILGUN_SMTP_SERVER\") EMAIL_PORT = config(\"MAILGUN_SMTP_PORT\") EMAIL_HOST_USER =", "'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'VEnCode_Django.urls' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = {", "'HOST': config(\"DB_HOST\"), 'PORT': '', } } # Static files (CSS, JavaScript, Images) #", "config('REDIS_URL', default=\"redis://\") CELERY_RESULT_BACKEND = config('REDIS_URL', default=\"redis://\") BROKER_URL = config('REDIS_URL', default=\"redis://\") # Allauth related", "Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # Configure Django", "email: ADMINS = [ ('Andre', '<EMAIL>'), ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware',", "related settings CELERY_BROKER_URL = config('REDIS_URL', default=\"redis://\") CELERY_RESULT_BACKEND = config('REDIS_URL', default=\"redis://\") BROKER_URL = config('REDIS_URL',", "Allauth related settings EMAIL_HOST = config(\"MAILGUN_SMTP_SERVER\") EMAIL_PORT = config(\"MAILGUN_SMTP_PORT\") EMAIL_HOST_USER = DEFAULT_FROM_EMAIL =", "settings CELERY_BROKER_URL = config('REDIS_URL', default=\"redis://\") CELERY_RESULT_BACKEND = config('REDIS_URL', default=\"redis://\") BROKER_URL = config('REDIS_URL', default=\"redis://\")", "DATABASES['default'].update(db_from_env) # Allauth configurations, backend to send sign-in e-mail verification e-mail: EMAIL_BACKEND =", "EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # REDIS related settings CELERY_BROKER_URL = config('REDIS_URL', default=\"redis://\") CELERY_RESULT_BACKEND =", "'USER': config(\"DB_USER\"), 'PASSWORD': '', 'HOST': config(\"DB_HOST\"), 'PORT': '', } } # Static files", "default=\"redis://\") CELERY_RESULT_BACKEND = config('REDIS_URL', default=\"redis://\") BROKER_URL = config('REDIS_URL', default=\"redis://\") # Allauth related settings", "(CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # Configure Django App for", "'', 'HOST': config(\"DB_HOST\"), 'PORT': '', } } # Static files (CSS, JavaScript, Images)", "configurations, backend to send sign-in e-mail verification e-mail: EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # REDIS", "import * import dj_database_url # SECURITY WARNING: don't run with debug turned on", "settings EMAIL_HOST = config(\"MAILGUN_SMTP_SERVER\") EMAIL_PORT = config(\"MAILGUN_SMTP_PORT\") EMAIL_HOST_USER = DEFAULT_FROM_EMAIL = config(\"MAILGUN_SMTP_LOGIN\") EMAIL_HOST_PASSWORD", ".base import * import dj_database_url # SECURITY WARNING: don't run with debug turned", "production! DEBUG = True ALLOWED_HOSTS = ['*'] # If DEBUG is False, send", "Allauth configurations, backend to send sign-in e-mail verification e-mail: EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' #", "'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'VEnCode_Django.urls' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases", "dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Allauth configurations, backend to send sign-in e-mail verification e-mail: EMAIL_BACKEND", "for Heroku. django_heroku.settings(locals()) # Production set up for heroku: db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env)", "is False, send the errors to the email: ADMINS = [ ('Andre', '<EMAIL>'),", "up for heroku: db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Allauth configurations, backend to send", "# Allauth configurations, backend to send sign-in e-mail verification e-mail: EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'", "default=\"redis://\") BROKER_URL = config('REDIS_URL', default=\"redis://\") # Allauth related settings EMAIL_HOST = config(\"MAILGUN_SMTP_SERVER\") EMAIL_PORT", "'django.core.mail.backends.smtp.EmailBackend' # REDIS related settings CELERY_BROKER_URL = config('REDIS_URL', default=\"redis://\") CELERY_RESULT_BACKEND = config('REDIS_URL', default=\"redis://\")", "dj_database_url # SECURITY WARNING: don't run with debug turned on in production! DEBUG", "on in production! DEBUG = True ALLOWED_HOSTS = ['*'] # If DEBUG is", "] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ]", "'whitenoise.storage.CompressedManifestStaticFilesStorage' # Configure Django App for Heroku. django_heroku.settings(locals()) # Production set up for", "'django.db.backends.postgresql_psycopg2', 'NAME': config(\"DB_NAME\"), 'USER': config(\"DB_USER\"), 'PASSWORD': '', 'HOST': config(\"DB_HOST\"), 'PORT': '', } }", "DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': config(\"DB_NAME\"), 'USER': config(\"DB_USER\"), 'PASSWORD': '',", "# Production set up for heroku: db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Allauth configurations,", "Heroku. django_heroku.settings(locals()) # Production set up for heroku: db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) #", "= True ALLOWED_HOSTS = ['*'] # If DEBUG is False, send the errors", "[ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'VEnCode_Django.urls'", "REDIS related settings CELERY_BROKER_URL = config('REDIS_URL', default=\"redis://\") CELERY_RESULT_BACKEND = config('REDIS_URL', default=\"redis://\") BROKER_URL =", "'<EMAIL>'), ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware',", "[ ('Andre', '<EMAIL>'), ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware',", "with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['*'] #", "'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'VEnCode_Django.urls' # Database #", "config(\"DB_USER\"), 'PASSWORD': '', 'HOST': config(\"DB_HOST\"), 'PORT': '', } } # Static files (CSS,", "= 'whitenoise.storage.CompressedManifestStaticFilesStorage' # Configure Django App for Heroku. django_heroku.settings(locals()) # Production set up", "# Configure Django App for Heroku. django_heroku.settings(locals()) # Production set up for heroku:", "EMAIL_HOST = config(\"MAILGUN_SMTP_SERVER\") EMAIL_PORT = config(\"MAILGUN_SMTP_PORT\") EMAIL_HOST_USER = DEFAULT_FROM_EMAIL = config(\"MAILGUN_SMTP_LOGIN\") EMAIL_HOST_PASSWORD =", "True ALLOWED_HOSTS = ['*'] # If DEBUG is False, send the errors to", "App for Heroku. django_heroku.settings(locals()) # Production set up for heroku: db_from_env = dj_database_url.config(conn_max_age=500)", "DEBUG is False, send the errors to the email: ADMINS = [ ('Andre',", "'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': config(\"DB_NAME\"), 'USER': config(\"DB_USER\"), 'PASSWORD': '', 'HOST': config(\"DB_HOST\"), 'PORT': '', }", "heroku: db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Allauth configurations, backend to send sign-in e-mail", "errors to the email: ADMINS = [ ('Andre', '<EMAIL>'), ] MIDDLEWARE = [", "STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # Configure Django App for Heroku. django_heroku.settings(locals()) # Production set", "# Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # Configure", "= config('REDIS_URL', default=\"redis://\") BROKER_URL = config('REDIS_URL', default=\"redis://\") # Allauth related settings EMAIL_HOST =", "from .base import * import dj_database_url # SECURITY WARNING: don't run with debug", "'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'VEnCode_Django.urls' # Database", "run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['*']", "{ 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': config(\"DB_NAME\"), 'USER': config(\"DB_USER\"), 'PASSWORD': '', 'HOST': config(\"DB_HOST\"),", "False, send the errors to the email: ADMINS = [ ('Andre', '<EMAIL>'), ]", "# REDIS related settings CELERY_BROKER_URL = config('REDIS_URL', default=\"redis://\") CELERY_RESULT_BACKEND = config('REDIS_URL', default=\"redis://\") BROKER_URL", "WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS", "the email: ADMINS = [ ('Andre', '<EMAIL>'), ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware',", "If DEBUG is False, send the errors to the email: ADMINS = [", "for heroku: db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Allauth configurations, backend to send sign-in", "don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS =", "turned on in production! DEBUG = True ALLOWED_HOSTS = ['*'] # If DEBUG", "'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': config(\"DB_NAME\"), 'USER': config(\"DB_USER\"), 'PASSWORD': '', 'HOST': config(\"DB_HOST\"), 'PORT':", "django_heroku.settings(locals()) # Production set up for heroku: db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Allauth", "MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF", "Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': config(\"DB_NAME\"), 'USER':", "CELERY_RESULT_BACKEND = config('REDIS_URL', default=\"redis://\") BROKER_URL = config('REDIS_URL', default=\"redis://\") # Allauth related settings EMAIL_HOST", "'VEnCode_Django.urls' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME':", "DEBUG = True ALLOWED_HOSTS = ['*'] # If DEBUG is False, send the", "# https://docs.djangoproject.com/en/3.0/howto/static-files/ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # Configure Django App for Heroku. django_heroku.settings(locals()) #", "= config('REDIS_URL', default=\"redis://\") # Allauth related settings EMAIL_HOST = config(\"MAILGUN_SMTP_SERVER\") EMAIL_PORT = config(\"MAILGUN_SMTP_PORT\")", "SECURITY WARNING: don't run with debug turned on in production! DEBUG = True", "= ['*'] # If DEBUG is False, send the errors to the email:", "CELERY_BROKER_URL = config('REDIS_URL', default=\"redis://\") CELERY_RESULT_BACKEND = config('REDIS_URL', default=\"redis://\") BROKER_URL = config('REDIS_URL', default=\"redis://\") #", "BROKER_URL = config('REDIS_URL', default=\"redis://\") # Allauth related settings EMAIL_HOST = config(\"MAILGUN_SMTP_SERVER\") EMAIL_PORT =", "in production! DEBUG = True ALLOWED_HOSTS = ['*'] # If DEBUG is False,", "Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # Configure Django App for Heroku. django_heroku.settings(locals())", "ROOT_URLCONF = 'VEnCode_Django.urls' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE':", "send sign-in e-mail verification e-mail: EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # REDIS related settings CELERY_BROKER_URL", "Django App for Heroku. django_heroku.settings(locals()) # Production set up for heroku: db_from_env =", "sign-in e-mail verification e-mail: EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # REDIS related settings CELERY_BROKER_URL =", "= [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF =", "= dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Allauth configurations, backend to send sign-in e-mail verification e-mail:", "the errors to the email: ADMINS = [ ('Andre', '<EMAIL>'), ] MIDDLEWARE =", "'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'VEnCode_Django.urls' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES", "} } # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'", "https://docs.djangoproject.com/en/3.0/howto/static-files/ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # Configure Django App for Heroku. django_heroku.settings(locals()) # Production", "'PASSWORD': '', 'HOST': config(\"DB_HOST\"), 'PORT': '', } } # Static files (CSS, JavaScript,", "e-mail: EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # REDIS related settings CELERY_BROKER_URL = config('REDIS_URL', default=\"redis://\") CELERY_RESULT_BACKEND" ]
[ "returns the solution. Parameters steal from transient reactive transport ---------- x0 : ndarray", "tspan : array_like Tuple (or array) containing the integration time span. saveat :", "is a combined array of solutions for each physics. The solution for each", "Get x from y, assume alg.Np is same for all algs x =", "[alg.Np for alg in self._algs] idx_end = np.cumsum(tmp) idx_start = np.hstack((0, idx_end[:-1])) x0", "r\"\"\" A subclass for transient multiphysics simulations. \"\"\" def __init__(self, algorithms, settings=None, **kwargs):", "``[Np:2*Np-1]`` refers to the temperature values. \"\"\" def ode_func(t, y): # Initialize RHS", "the time points at which the solution is to be stored, and if", "The solution for each physics is available on each algorithm object independently. \"\"\"", "helper function # Store x onto algorithm, alg.x = x # Build A", "function handle, which calculates dy/dt = rhs(y, t). Notes ----- ``y`` is a", "on each algorithm object independently. \"\"\" logger.info('Running TransientMultiphysics') if np.isscalar(saveat): saveat = np.arange(*tspan,", "\"\"\" def __init__(self, algorithms, settings=None, **kwargs): self.settings = SettingsAttr(TransientMultiPhysicsSettings, settings) self.settings.algorithms = [alg.name", "from openpnm.integrators import ScipyRK45 from openpnm.algorithms import GenericAlgorithm from openpnm.algorithms._solution import SolutionContainer, TransientSolution", "the obj (needed by _update_iterative_props) x0_i = self._get_x0(x0, i) alg['pore.ic'] = x0_i =", "the given solver soln = integrator.solve(rhs, x0, tspan, saveat) # Return dictionary containing", "manner \"\"\" algorithms = [] @docstr.dedent class TransientMultiPhysics(GenericAlgorithm): r\"\"\" A subclass for transient", "def _build_rhs(self): \"\"\" Returns a function handle, which calculates dy/dt = rhs(y, t).", "transient multiphysics simulations. \"\"\" def __init__(self, algorithms, settings=None, **kwargs): self.settings = SettingsAttr(TransientMultiPhysicsSettings, settings)", "refers to the concentration, and ``[Np:2*Np-1]`` refers to the temperature values. \"\"\" def", "for transient multiphysics simulations. \"\"\" def __init__(self, algorithms, settings=None, **kwargs): self.settings = SettingsAttr(TransientMultiPhysicsSettings,", "pre-solve validations alg._validate_settings() alg._validate_data_health() # Write x0 to algorithm the obj (needed by", "are ``TransientFickianDiffusion``, and ``TransientFourierConduction``, ``y[0:Np-1]`` refers to the concentration, and ``[Np:2*Np-1]`` refers to", "rhs return ode_func def _get_x0(self, x0, i): tmp = [alg.Np for alg in", "contains ALL the variables that the multiphysics algorithm solves for, e.g., if the", "openpnm.algorithms._solution import SolutionContainer, TransientSolution logger = logging.getLogger(__name__) docstr = Docorator() @docstr.dedent class TransientMultiPhysicsSettings:", "to be stored. integrator : Integrator, optional Integrator object which will be used", "the temperature values. \"\"\" def ode_func(t, y): # Initialize RHS rhs = []", "r\"\"\" Parameters ---------- %(GenericAlgorithmSettings.parameters)s algorithms: list List of transient algorithm objects to be", "algorithm, alg.x = x # Build A and b alg._update_A_and_b() A = alg.A.tocsc()", "at which the solution is to be stored, and if a scalar is", "onto algorithm, alg.x = x # Build A and b alg._update_A_and_b() A =", "= [] @docstr.dedent class TransientMultiPhysics(GenericAlgorithm): r\"\"\" A subclass for transient multiphysics simulations. \"\"\"", "None) and (tspan[1] not in saveat): saveat = np.hstack((saveat, [tspan[1]])) integrator = ScipyRK45()", "class TransientMultiPhysicsSettings: r\"\"\" Parameters ---------- %(GenericAlgorithmSettings.parameters)s algorithms: list List of transient algorithm objects", "x0_i = self._get_x0(x0, i) alg['pore.ic'] = x0_i = np.ones(alg.Np, dtype=float) * x0_i alg._merge_inital_and_boundary_values()", "called to return the solution at intermediate times (i.e., those not stored in", "multiphysics simulations. \"\"\" def __init__(self, algorithms, settings=None, **kwargs): self.settings = SettingsAttr(TransientMultiPhysicsSettings, settings) self.settings.algorithms", "is passed, it refers to the interval at which the solution is to", "in algorithms] self._algs = algorithms super().__init__(settings=self.settings, **kwargs) def run(self, x0, tspan, saveat=None, integrator=None):", "= np.arange(*tspan, saveat) if (saveat is not None) and (tspan[1] not in saveat):", "Notes ----- ``y`` is a composite array that contains ALL the variables that", "a function handle, which calculates dy/dt = rhs(y, t). Notes ----- ``y`` is", "RHS using the given solver soln = integrator.solve(rhs, x0, tspan, saveat) # Return", "solution self.soln = SolutionContainer() for i, alg in enumerate(self._algs): # Slice soln and", "each algorithm object independently. \"\"\" logger.info('Running TransientMultiphysics') if np.isscalar(saveat): saveat = np.arange(*tspan, saveat)", "------- TransientSolution The solution object, which is basically a numpy array with the", "alg.network[alg.settings[\"pore_volume\"]] # Calcualte RHS rhs_alg = np.hstack(-A.dot(x) + b)/V rhs = np.hstack((rhs, rhs_alg))", "array is passed, it signifies the time points at which the solution is", "If an array is passed, it signifies the time points at which the", "assume alg.Np is same for all algs x = self._get_x0(y, i) # again", "the transient algorithms simultaneoulsy and returns the solution. Parameters steal from transient reactive", "containing solution self.soln = SolutionContainer() for i, alg in enumerate(self._algs): # Slice soln", "an array is passed, it signifies the time points at which the solution", "using the given solver soln = integrator.solve(rhs, x0, tspan, saveat) # Return dictionary", "ALL the variables that the multiphysics algorithm solves for, e.g., if the constituent", "dtype=float) * x0_i alg._merge_inital_and_boundary_values() # Build RHS (dx/dt = RHS), then integrate the", "stored in the solution object). In the case of multiphysics, the solution object", "x0_i = np.ones(alg.Np, dtype=float) * x0_i alg._merge_inital_and_boundary_values() # Build RHS (dx/dt = RHS),", "Retrieve volume V = alg.network[alg.settings[\"pore_volume\"]] # Calcualte RHS rhs_alg = np.hstack(-A.dot(x) + b)/V", "all of the transient algorithms simultaneoulsy and returns the solution. Parameters steal from", "= [alg.name for alg in algorithms] self._algs = algorithms super().__init__(settings=self.settings, **kwargs) def run(self,", "Parameters steal from transient reactive transport ---------- x0 : ndarray or float Array", "solution at intermediate times (i.e., those not stored in the solution object). In", "constituent algorithms are ``TransientFickianDiffusion``, and ``TransientFourierConduction``, ``y[0:Np-1]`` refers to the concentration, and ``[Np:2*Np-1]``", "solution dictionary self.soln[alg.settings['quantity']] = alg.soln return self.soln def _run_special(self, x0): ... def _build_rhs(self):", "x0 to algorithm the obj (needed by _update_iterative_props) x0_i = self._get_x0(x0, i) alg['pore.ic']", "the system of ODEs rhs = self._build_rhs() # Integrate RHS using the given", "alg in self._algs] idx_end = np.cumsum(tmp) idx_start = np.hstack((0, idx_end[:-1])) x0 = x0[idx_start[i]:idx_end[i]]", "Returns a function handle, which calculates dy/dt = rhs(y, t). Notes ----- ``y``", "# Integrate RHS using the given solver soln = integrator.solve(rhs, x0, tspan, saveat)", "combined array of solutions for each physics. The solution for each physics is", "= algorithms super().__init__(settings=self.settings, **kwargs) def run(self, x0, tspan, saveat=None, integrator=None): \"\"\" Runs all", "available on each algorithm object independently. \"\"\" logger.info('Running TransientMultiphysics') if np.isscalar(saveat): saveat =", "ScipyRK45 from openpnm.algorithms import GenericAlgorithm from openpnm.algorithms._solution import SolutionContainer, TransientSolution logger = logging.getLogger(__name__)", "(saveat is not None) and (tspan[1] not in saveat): saveat = np.hstack((saveat, [tspan[1]]))", ": Integrator, optional Integrator object which will be used to to the time", "self.soln def _run_special(self, x0): ... def _build_rhs(self): \"\"\" Returns a function handle, which", "the added functionality that it can be called to return the solution at", "for all algs x = self._get_x0(y, i) # again use helper function #", "TransientMultiPhysicsSettings: r\"\"\" Parameters ---------- %(GenericAlgorithmSettings.parameters)s algorithms: list List of transient algorithm objects to", "openpnm.utils import SettingsAttr, Docorator from openpnm.integrators import ScipyRK45 from openpnm.algorithms import GenericAlgorithm from", "coupled manner \"\"\" algorithms = [] @docstr.dedent class TransientMultiPhysics(GenericAlgorithm): r\"\"\" A subclass for", "Integrate RHS using the given solver soln = integrator.solve(rhs, x0, tspan, saveat) #", "optional Integrator object which will be used to to the time stepping. Can", "_run_special(self, x0): ... def _build_rhs(self): \"\"\" Returns a function handle, which calculates dy/dt", "if np.isscalar(saveat): saveat = np.arange(*tspan, saveat) if (saveat is not None) and (tspan[1]", "the solution. Parameters steal from transient reactive transport ---------- x0 : ndarray or", "b)/V rhs = np.hstack((rhs, rhs_alg)) return rhs return ode_func def _get_x0(self, x0, i):", "for i, alg in enumerate(self._algs): # Perform pre-solve validations alg._validate_settings() alg._validate_data_health() # Write", "concentration, and ``[Np:2*Np-1]`` refers to the temperature values. \"\"\" def ode_func(t, y): #", "[alg.name for alg in algorithms] self._algs = algorithms super().__init__(settings=self.settings, **kwargs) def run(self, x0,", "from y, assume alg.Np is same for all algs x = self._get_x0(y, i)", "= TransientSolution(t, x) # Add solution of each alg to solution dictionary self.soln[alg.settings['quantity']]", "condition values. tspan : array_like Tuple (or array) containing the integration time span.", "using openpnm.integrators module. Returns ------- TransientSolution The solution object, which is basically a", "to the time stepping. Can be instantiated using openpnm.integrators module. Returns ------- TransientSolution", "TransientSolution The solution object, which is basically a numpy array with the added", "object which will be used to to the time stepping. Can be instantiated", "objects to be solved in a coupled manner \"\"\" algorithms = [] @docstr.dedent", "= np.hstack((saveat, [tspan[1]])) integrator = ScipyRK45() if integrator is None else integrator for", "logger.info('Running TransientMultiphysics') if np.isscalar(saveat): saveat = np.arange(*tspan, saveat) if (saveat is not None)", "algorithm solves for, e.g., if the constituent algorithms are ``TransientFickianDiffusion``, and ``TransientFourierConduction``, ``y[0:Np-1]``", "ODEs rhs = self._build_rhs() # Integrate RHS using the given solver soln =", "[] @docstr.dedent class TransientMultiPhysics(GenericAlgorithm): r\"\"\" A subclass for transient multiphysics simulations. \"\"\" def", "the solution at intermediate times (i.e., those not stored in the solution object).", "to return the solution at intermediate times (i.e., those not stored in the", "from openpnm.utils import SettingsAttr, Docorator from openpnm.integrators import ScipyRK45 from openpnm.algorithms import GenericAlgorithm", "_update_iterative_props) x0_i = self._get_x0(x0, i) alg['pore.ic'] = x0_i = np.ones(alg.Np, dtype=float) * x0_i", "be called to return the solution at intermediate times (i.e., those not stored", "a combined array of solutions for each physics. The solution for each physics", "which will be used to to the time stepping. Can be instantiated using", "A subclass for transient multiphysics simulations. \"\"\" def __init__(self, algorithms, settings=None, **kwargs): self.settings", "of multiphysics, the solution object is a combined array of solutions for each", "= self._build_rhs() # Integrate RHS using the given solver soln = integrator.solve(rhs, x0,", "numpy as np from openpnm.utils import SettingsAttr, Docorator from openpnm.integrators import ScipyRK45 from", "numpy array with the added functionality that it can be called to return", "----- ``y`` is a composite array that contains ALL the variables that the", "alg in enumerate(self._algs): # Get x from y, assume alg.Np is same for", "is available on each algorithm object independently. \"\"\" logger.info('Running TransientMultiphysics') if np.isscalar(saveat): saveat", "alg in enumerate(self._algs): # Perform pre-solve validations alg._validate_settings() alg._validate_data_health() # Write x0 to", "settings) self.settings.algorithms = [alg.name for alg in algorithms] self._algs = algorithms super().__init__(settings=self.settings, **kwargs)", "self._algs = algorithms super().__init__(settings=self.settings, **kwargs) def run(self, x0, tspan, saveat=None, integrator=None): \"\"\" Runs", "TransientSolution logger = logging.getLogger(__name__) docstr = Docorator() @docstr.dedent class TransientMultiPhysicsSettings: r\"\"\" Parameters ----------", "np.isscalar(saveat): saveat = np.arange(*tspan, saveat) if (saveat is not None) and (tspan[1] not", "return self.soln def _run_special(self, x0): ... def _build_rhs(self): \"\"\" Returns a function handle,", "stored, and if a scalar is passed, it refers to the interval at", "b alg._update_A_and_b() A = alg.A.tocsc() b = alg.b # Retrieve volume V =", "each alg t = soln.t x = soln[i*alg.Np:(i+1)*alg.Np, :] alg.soln = TransientSolution(t, x)", "simulations. \"\"\" def __init__(self, algorithms, settings=None, **kwargs): self.settings = SettingsAttr(TransientMultiPhysicsSettings, settings) self.settings.algorithms =", "# Return dictionary containing solution self.soln = SolutionContainer() for i, alg in enumerate(self._algs):", "rhs = [] for i, alg in enumerate(self._algs): # Get x from y,", "it signifies the time points at which the solution is to be stored,", "the integration time span. saveat : array_like or float, optional If an array", "interval at which the solution is to be stored. integrator : Integrator, optional", "and attach as TransientSolution object to each alg t = soln.t x =", "solution is to be stored. integrator : Integrator, optional Integrator object which will", "In the case of multiphysics, the solution object is a combined array of", "to to the time stepping. Can be instantiated using openpnm.integrators module. Returns -------", "``TransientFickianDiffusion``, and ``TransientFourierConduction``, ``y[0:Np-1]`` refers to the concentration, and ``[Np:2*Np-1]`` refers to the", "simultaneoulsy and returns the solution. Parameters steal from transient reactive transport ---------- x0", "t = soln.t x = soln[i*alg.Np:(i+1)*alg.Np, :] alg.soln = TransientSolution(t, x) # Add", "and ``TransientFourierConduction``, ``y[0:Np-1]`` refers to the concentration, and ``[Np:2*Np-1]`` refers to the temperature", "``TransientFourierConduction``, ``y[0:Np-1]`` refers to the concentration, and ``[Np:2*Np-1]`` refers to the temperature values.", "the solution is to be stored, and if a scalar is passed, it", "rhs = self._build_rhs() # Integrate RHS using the given solver soln = integrator.solve(rhs,", "variables that the multiphysics algorithm solves for, e.g., if the constituent algorithms are", "and (tspan[1] not in saveat): saveat = np.hstack((saveat, [tspan[1]])) integrator = ScipyRK45() if", "# Initialize RHS rhs = [] for i, alg in enumerate(self._algs): # Get", "obj (needed by _update_iterative_props) x0_i = self._get_x0(x0, i) alg['pore.ic'] = x0_i = np.ones(alg.Np,", "tspan, saveat) # Return dictionary containing solution self.soln = SolutionContainer() for i, alg", "RHS (dx/dt = RHS), then integrate the system of ODEs rhs = self._build_rhs()", "self._build_rhs() # Integrate RHS using the given solver soln = integrator.solve(rhs, x0, tspan,", "@docstr.dedent class TransientMultiPhysics(GenericAlgorithm): r\"\"\" A subclass for transient multiphysics simulations. \"\"\" def __init__(self,", "settings=None, **kwargs): self.settings = SettingsAttr(TransientMultiPhysicsSettings, settings) self.settings.algorithms = [alg.name for alg in algorithms]", "subclass for transient multiphysics simulations. \"\"\" def __init__(self, algorithms, settings=None, **kwargs): self.settings =", "added functionality that it can be called to return the solution at intermediate", "x onto algorithm, alg.x = x # Build A and b alg._update_A_and_b() A", "[] for i, alg in enumerate(self._algs): # Get x from y, assume alg.Np", "# Build A and b alg._update_A_and_b() A = alg.A.tocsc() b = alg.b #", "x = self._get_x0(y, i) # again use helper function # Store x onto", "and returns the solution. Parameters steal from transient reactive transport ---------- x0 :", "of transient algorithm objects to be solved in a coupled manner \"\"\" algorithms", "scalar) containing initial condition values. tspan : array_like Tuple (or array) containing the", "rhs = np.hstack((rhs, rhs_alg)) return rhs return ode_func def _get_x0(self, x0, i): tmp", "[tspan[1]])) integrator = ScipyRK45() if integrator is None else integrator for i, alg", "i, alg in enumerate(self._algs): # Perform pre-solve validations alg._validate_settings() alg._validate_data_health() # Write x0", "Store x onto algorithm, alg.x = x # Build A and b alg._update_A_and_b()", "self.settings = SettingsAttr(TransientMultiPhysicsSettings, settings) self.settings.algorithms = [alg.name for alg in algorithms] self._algs =", "then integrate the system of ODEs rhs = self._build_rhs() # Integrate RHS using", "array that contains ALL the variables that the multiphysics algorithm solves for, e.g.,", "to the concentration, and ``[Np:2*Np-1]`` refers to the temperature values. \"\"\" def ode_func(t,", "points at which the solution is to be stored, and if a scalar", "the solution object). In the case of multiphysics, the solution object is a", "integrator : Integrator, optional Integrator object which will be used to to the", "float Array (or scalar) containing initial condition values. tspan : array_like Tuple (or", "signifies the time points at which the solution is to be stored, and", "SolutionContainer() for i, alg in enumerate(self._algs): # Slice soln and attach as TransientSolution", "def __init__(self, algorithms, settings=None, **kwargs): self.settings = SettingsAttr(TransientMultiPhysicsSettings, settings) self.settings.algorithms = [alg.name for", "function # Store x onto algorithm, alg.x = x # Build A and", "TransientMultiphysics') if np.isscalar(saveat): saveat = np.arange(*tspan, saveat) if (saveat is not None) and", "# again use helper function # Store x onto algorithm, alg.x = x", "\"\"\" def ode_func(t, y): # Initialize RHS rhs = [] for i, alg", "np.hstack(-A.dot(x) + b)/V rhs = np.hstack((rhs, rhs_alg)) return rhs return ode_func def _get_x0(self,", "ScipyRK45() if integrator is None else integrator for i, alg in enumerate(self._algs): #", "integrator for i, alg in enumerate(self._algs): # Perform pre-solve validations alg._validate_settings() alg._validate_data_health() #", "to be solved in a coupled manner \"\"\" algorithms = [] @docstr.dedent class", "%(GenericAlgorithmSettings.parameters)s algorithms: list List of transient algorithm objects to be solved in a", "in self._algs] idx_end = np.cumsum(tmp) idx_start = np.hstack((0, idx_end[:-1])) x0 = x0[idx_start[i]:idx_end[i]] return", "of solutions for each physics. The solution for each physics is available on", "A = alg.A.tocsc() b = alg.b # Retrieve volume V = alg.network[alg.settings[\"pore_volume\"]] #", "algorithms are ``TransientFickianDiffusion``, and ``TransientFourierConduction``, ``y[0:Np-1]`` refers to the concentration, and ``[Np:2*Np-1]`` refers", "Slice soln and attach as TransientSolution object to each alg t = soln.t", "if (saveat is not None) and (tspan[1] not in saveat): saveat = np.hstack((saveat,", "---------- x0 : ndarray or float Array (or scalar) containing initial condition values.", "the interval at which the solution is to be stored. integrator : Integrator,", "_get_x0(self, x0, i): tmp = [alg.Np for alg in self._algs] idx_end = np.cumsum(tmp)", "run(self, x0, tspan, saveat=None, integrator=None): \"\"\" Runs all of the transient algorithms simultaneoulsy", "the case of multiphysics, the solution object is a combined array of solutions", "# Get x from y, assume alg.Np is same for all algs x", "alg._update_A_and_b() A = alg.A.tocsc() b = alg.b # Retrieve volume V = alg.network[alg.settings[\"pore_volume\"]]", "i) alg['pore.ic'] = x0_i = np.ones(alg.Np, dtype=float) * x0_i alg._merge_inital_and_boundary_values() # Build RHS", "return rhs return ode_func def _get_x0(self, x0, i): tmp = [alg.Np for alg", "RHS rhs = [] for i, alg in enumerate(self._algs): # Get x from", "(needed by _update_iterative_props) x0_i = self._get_x0(x0, i) alg['pore.ic'] = x0_i = np.ones(alg.Np, dtype=float)", ": ndarray or float Array (or scalar) containing initial condition values. tspan :", "solution. Parameters steal from transient reactive transport ---------- x0 : ndarray or float", "logging import numpy as np from openpnm.utils import SettingsAttr, Docorator from openpnm.integrators import", "SettingsAttr(TransientMultiPhysicsSettings, settings) self.settings.algorithms = [alg.name for alg in algorithms] self._algs = algorithms super().__init__(settings=self.settings,", "List of transient algorithm objects to be solved in a coupled manner \"\"\"", "i, alg in enumerate(self._algs): # Slice soln and attach as TransientSolution object to", "array of solutions for each physics. The solution for each physics is available", "passed, it refers to the interval at which the solution is to be", "to the interval at which the solution is to be stored. integrator :", "the variables that the multiphysics algorithm solves for, e.g., if the constituent algorithms", "intermediate times (i.e., those not stored in the solution object). In the case", "volume V = alg.network[alg.settings[\"pore_volume\"]] # Calcualte RHS rhs_alg = np.hstack(-A.dot(x) + b)/V rhs", "span. saveat : array_like or float, optional If an array is passed, it", "values. \"\"\" def ode_func(t, y): # Initialize RHS rhs = [] for i,", "= [] for i, alg in enumerate(self._algs): # Get x from y, assume", "V = alg.network[alg.settings[\"pore_volume\"]] # Calcualte RHS rhs_alg = np.hstack(-A.dot(x) + b)/V rhs =", "# Calcualte RHS rhs_alg = np.hstack(-A.dot(x) + b)/V rhs = np.hstack((rhs, rhs_alg)) return", "_build_rhs(self): \"\"\" Returns a function handle, which calculates dy/dt = rhs(y, t). Notes", "use helper function # Store x onto algorithm, alg.x = x # Build", "array_like Tuple (or array) containing the integration time span. saveat : array_like or", "in a coupled manner \"\"\" algorithms = [] @docstr.dedent class TransientMultiPhysics(GenericAlgorithm): r\"\"\" A", "a coupled manner \"\"\" algorithms = [] @docstr.dedent class TransientMultiPhysics(GenericAlgorithm): r\"\"\" A subclass", "for i, alg in enumerate(self._algs): # Slice soln and attach as TransientSolution object", "ode_func(t, y): # Initialize RHS rhs = [] for i, alg in enumerate(self._algs):", "solution of each alg to solution dictionary self.soln[alg.settings['quantity']] = alg.soln return self.soln def", "Calcualte RHS rhs_alg = np.hstack(-A.dot(x) + b)/V rhs = np.hstack((rhs, rhs_alg)) return rhs", "is a composite array that contains ALL the variables that the multiphysics algorithm", "for alg in algorithms] self._algs = algorithms super().__init__(settings=self.settings, **kwargs) def run(self, x0, tspan,", "functionality that it can be called to return the solution at intermediate times", "saveat) # Return dictionary containing solution self.soln = SolutionContainer() for i, alg in", "the constituent algorithms are ``TransientFickianDiffusion``, and ``TransientFourierConduction``, ``y[0:Np-1]`` refers to the concentration, and", ": array_like or float, optional If an array is passed, it signifies the", "to be stored, and if a scalar is passed, it refers to the", "is to be stored. integrator : Integrator, optional Integrator object which will be", "x) # Add solution of each alg to solution dictionary self.soln[alg.settings['quantity']] = alg.soln", "def _get_x0(self, x0, i): tmp = [alg.Np for alg in self._algs] idx_end =", "solutions for each physics. The solution for each physics is available on each", "np.ones(alg.Np, dtype=float) * x0_i alg._merge_inital_and_boundary_values() # Build RHS (dx/dt = RHS), then integrate", "alg.x = x # Build A and b alg._update_A_and_b() A = alg.A.tocsc() b", "def _run_special(self, x0): ... def _build_rhs(self): \"\"\" Returns a function handle, which calculates", "from openpnm.algorithms import GenericAlgorithm from openpnm.algorithms._solution import SolutionContainer, TransientSolution logger = logging.getLogger(__name__) docstr", "(i.e., those not stored in the solution object). In the case of multiphysics,", "dictionary containing solution self.soln = SolutionContainer() for i, alg in enumerate(self._algs): # Slice", "saveat = np.hstack((saveat, [tspan[1]])) integrator = ScipyRK45() if integrator is None else integrator", "Parameters ---------- %(GenericAlgorithmSettings.parameters)s algorithms: list List of transient algorithm objects to be solved", "by _update_iterative_props) x0_i = self._get_x0(x0, i) alg['pore.ic'] = x0_i = np.ones(alg.Np, dtype=float) *", "RHS), then integrate the system of ODEs rhs = self._build_rhs() # Integrate RHS", "(dx/dt = RHS), then integrate the system of ODEs rhs = self._build_rhs() #", "not None) and (tspan[1] not in saveat): saveat = np.hstack((saveat, [tspan[1]])) integrator =", "y, assume alg.Np is same for all algs x = self._get_x0(y, i) #", "if integrator is None else integrator for i, alg in enumerate(self._algs): # Perform", "for i, alg in enumerate(self._algs): # Get x from y, assume alg.Np is", "# Store x onto algorithm, alg.x = x # Build A and b", "= x # Build A and b alg._update_A_and_b() A = alg.A.tocsc() b =", "(or array) containing the integration time span. saveat : array_like or float, optional", "* x0_i alg._merge_inital_and_boundary_values() # Build RHS (dx/dt = RHS), then integrate the system", "times (i.e., those not stored in the solution object). In the case of", "(tspan[1] not in saveat): saveat = np.hstack((saveat, [tspan[1]])) integrator = ScipyRK45() if integrator", "or float, optional If an array is passed, it signifies the time points", "be used to to the time stepping. Can be instantiated using openpnm.integrators module.", "stepping. Can be instantiated using openpnm.integrators module. Returns ------- TransientSolution The solution object,", "= RHS), then integrate the system of ODEs rhs = self._build_rhs() # Integrate", "import ScipyRK45 from openpnm.algorithms import GenericAlgorithm from openpnm.algorithms._solution import SolutionContainer, TransientSolution logger =", "algorithms] self._algs = algorithms super().__init__(settings=self.settings, **kwargs) def run(self, x0, tspan, saveat=None, integrator=None): \"\"\"", "to the temperature values. \"\"\" def ode_func(t, y): # Initialize RHS rhs =", "return ode_func def _get_x0(self, x0, i): tmp = [alg.Np for alg in self._algs]", "integrator=None): \"\"\" Runs all of the transient algorithms simultaneoulsy and returns the solution.", "at intermediate times (i.e., those not stored in the solution object). In the", "object is a combined array of solutions for each physics. The solution for", "Perform pre-solve validations alg._validate_settings() alg._validate_data_health() # Write x0 to algorithm the obj (needed", "i, alg in enumerate(self._algs): # Get x from y, assume alg.Np is same", "alg t = soln.t x = soln[i*alg.Np:(i+1)*alg.Np, :] alg.soln = TransientSolution(t, x) #", "algorithms, settings=None, **kwargs): self.settings = SettingsAttr(TransientMultiPhysicsSettings, settings) self.settings.algorithms = [alg.name for alg in", "time stepping. Can be instantiated using openpnm.integrators module. Returns ------- TransientSolution The solution", "Write x0 to algorithm the obj (needed by _update_iterative_props) x0_i = self._get_x0(x0, i)", "it refers to the interval at which the solution is to be stored.", "TransientSolution(t, x) # Add solution of each alg to solution dictionary self.soln[alg.settings['quantity']] =", "i): tmp = [alg.Np for alg in self._algs] idx_end = np.cumsum(tmp) idx_start =", "attach as TransientSolution object to each alg t = soln.t x = soln[i*alg.Np:(i+1)*alg.Np,", "= alg.soln return self.soln def _run_special(self, x0): ... def _build_rhs(self): \"\"\" Returns a", "solution object, which is basically a numpy array with the added functionality that", "validations alg._validate_settings() alg._validate_data_health() # Write x0 to algorithm the obj (needed by _update_iterative_props)", "in enumerate(self._algs): # Get x from y, assume alg.Np is same for all", "TransientSolution object to each alg t = soln.t x = soln[i*alg.Np:(i+1)*alg.Np, :] alg.soln", "object). In the case of multiphysics, the solution object is a combined array", "``y[0:Np-1]`` refers to the concentration, and ``[Np:2*Np-1]`` refers to the temperature values. \"\"\"", "+ b)/V rhs = np.hstack((rhs, rhs_alg)) return rhs return ode_func def _get_x0(self, x0,", "= np.hstack(-A.dot(x) + b)/V rhs = np.hstack((rhs, rhs_alg)) return rhs return ode_func def", "solution for each physics is available on each algorithm object independently. \"\"\" logger.info('Running", "used to to the time stepping. Can be instantiated using openpnm.integrators module. Returns", "algorithm the obj (needed by _update_iterative_props) x0_i = self._get_x0(x0, i) alg['pore.ic'] = x0_i", "docstr = Docorator() @docstr.dedent class TransientMultiPhysicsSettings: r\"\"\" Parameters ---------- %(GenericAlgorithmSettings.parameters)s algorithms: list List", "solved in a coupled manner \"\"\" algorithms = [] @docstr.dedent class TransientMultiPhysics(GenericAlgorithm): r\"\"\"", "Build RHS (dx/dt = RHS), then integrate the system of ODEs rhs =", "of each alg to solution dictionary self.soln[alg.settings['quantity']] = alg.soln return self.soln def _run_special(self,", "# Perform pre-solve validations alg._validate_settings() alg._validate_data_health() # Write x0 to algorithm the obj", "the solution is to be stored. integrator : Integrator, optional Integrator object which", "integrator = ScipyRK45() if integrator is None else integrator for i, alg in", "= alg.network[alg.settings[\"pore_volume\"]] # Calcualte RHS rhs_alg = np.hstack(-A.dot(x) + b)/V rhs = np.hstack((rhs,", "to algorithm the obj (needed by _update_iterative_props) x0_i = self._get_x0(x0, i) alg['pore.ic'] =", "self._algs] idx_end = np.cumsum(tmp) idx_start = np.hstack((0, idx_end[:-1])) x0 = x0[idx_start[i]:idx_end[i]] return x0", "integrator is None else integrator for i, alg in enumerate(self._algs): # Perform pre-solve", "transient algorithms simultaneoulsy and returns the solution. Parameters steal from transient reactive transport", "Runs all of the transient algorithms simultaneoulsy and returns the solution. Parameters steal", "module. Returns ------- TransientSolution The solution object, which is basically a numpy array", "time span. saveat : array_like or float, optional If an array is passed,", "refers to the interval at which the solution is to be stored. integrator", "at which the solution is to be stored. integrator : Integrator, optional Integrator", "SolutionContainer, TransientSolution logger = logging.getLogger(__name__) docstr = Docorator() @docstr.dedent class TransientMultiPhysicsSettings: r\"\"\" Parameters", "= x0_i = np.ones(alg.Np, dtype=float) * x0_i alg._merge_inital_and_boundary_values() # Build RHS (dx/dt =", "solution is to be stored, and if a scalar is passed, it refers", "import SolutionContainer, TransientSolution logger = logging.getLogger(__name__) docstr = Docorator() @docstr.dedent class TransientMultiPhysicsSettings: r\"\"\"", "alg.soln = TransientSolution(t, x) # Add solution of each alg to solution dictionary", "enumerate(self._algs): # Slice soln and attach as TransientSolution object to each alg t", "object to each alg t = soln.t x = soln[i*alg.Np:(i+1)*alg.Np, :] alg.soln =", "soln.t x = soln[i*alg.Np:(i+1)*alg.Np, :] alg.soln = TransientSolution(t, x) # Add solution of", "self.soln[alg.settings['quantity']] = alg.soln return self.soln def _run_special(self, x0): ... def _build_rhs(self): \"\"\" Returns", "i) # again use helper function # Store x onto algorithm, alg.x =", "Docorator from openpnm.integrators import ScipyRK45 from openpnm.algorithms import GenericAlgorithm from openpnm.algorithms._solution import SolutionContainer,", "of the transient algorithms simultaneoulsy and returns the solution. Parameters steal from transient", "openpnm.algorithms import GenericAlgorithm from openpnm.algorithms._solution import SolutionContainer, TransientSolution logger = logging.getLogger(__name__) docstr =", "solves for, e.g., if the constituent algorithms are ``TransientFickianDiffusion``, and ``TransientFourierConduction``, ``y[0:Np-1]`` refers", "time points at which the solution is to be stored, and if a", "physics. The solution for each physics is available on each algorithm object independently.", "is same for all algs x = self._get_x0(y, i) # again use helper", "Return dictionary containing solution self.soln = SolutionContainer() for i, alg in enumerate(self._algs): #", "alg._validate_data_health() # Write x0 to algorithm the obj (needed by _update_iterative_props) x0_i =", "which is basically a numpy array with the added functionality that it can", "x0, tspan, saveat=None, integrator=None): \"\"\" Runs all of the transient algorithms simultaneoulsy and", "values. tspan : array_like Tuple (or array) containing the integration time span. saveat", "\"\"\" logger.info('Running TransientMultiphysics') if np.isscalar(saveat): saveat = np.arange(*tspan, saveat) if (saveat is not", "which the solution is to be stored. integrator : Integrator, optional Integrator object", "in enumerate(self._algs): # Slice soln and attach as TransientSolution object to each alg", "system of ODEs rhs = self._build_rhs() # Integrate RHS using the given solver", ":] alg.soln = TransientSolution(t, x) # Add solution of each alg to solution", "self.soln = SolutionContainer() for i, alg in enumerate(self._algs): # Slice soln and attach", "composite array that contains ALL the variables that the multiphysics algorithm solves for,", "(or scalar) containing initial condition values. tspan : array_like Tuple (or array) containing", "can be called to return the solution at intermediate times (i.e., those not", "... def _build_rhs(self): \"\"\" Returns a function handle, which calculates dy/dt = rhs(y,", "solution object is a combined array of solutions for each physics. The solution", "= soln.t x = soln[i*alg.Np:(i+1)*alg.Np, :] alg.soln = TransientSolution(t, x) # Add solution", "will be used to to the time stepping. Can be instantiated using openpnm.integrators", "steal from transient reactive transport ---------- x0 : ndarray or float Array (or", "for alg in self._algs] idx_end = np.cumsum(tmp) idx_start = np.hstack((0, idx_end[:-1])) x0 =", "the solution object is a combined array of solutions for each physics. The", "t). Notes ----- ``y`` is a composite array that contains ALL the variables", "soln = integrator.solve(rhs, x0, tspan, saveat) # Return dictionary containing solution self.soln =", "transient reactive transport ---------- x0 : ndarray or float Array (or scalar) containing", "def run(self, x0, tspan, saveat=None, integrator=None): \"\"\" Runs all of the transient algorithms", "algorithm objects to be solved in a coupled manner \"\"\" algorithms = []", "containing initial condition values. tspan : array_like Tuple (or array) containing the integration", "ndarray or float Array (or scalar) containing initial condition values. tspan : array_like", "= logging.getLogger(__name__) docstr = Docorator() @docstr.dedent class TransientMultiPhysicsSettings: r\"\"\" Parameters ---------- %(GenericAlgorithmSettings.parameters)s algorithms:", "logger = logging.getLogger(__name__) docstr = Docorator() @docstr.dedent class TransientMultiPhysicsSettings: r\"\"\" Parameters ---------- %(GenericAlgorithmSettings.parameters)s", "algs x = self._get_x0(y, i) # again use helper function # Store x", "Integrator object which will be used to to the time stepping. Can be", "in the solution object). In the case of multiphysics, the solution object is", "= np.ones(alg.Np, dtype=float) * x0_i alg._merge_inital_and_boundary_values() # Build RHS (dx/dt = RHS), then", "rhs_alg)) return rhs return ode_func def _get_x0(self, x0, i): tmp = [alg.Np for", "alg._validate_settings() alg._validate_data_health() # Write x0 to algorithm the obj (needed by _update_iterative_props) x0_i", "soln and attach as TransientSolution object to each alg t = soln.t x", "= self._get_x0(y, i) # again use helper function # Store x onto algorithm,", "is passed, it signifies the time points at which the solution is to", "algorithms super().__init__(settings=self.settings, **kwargs) def run(self, x0, tspan, saveat=None, integrator=None): \"\"\" Runs all of", "self.settings.algorithms = [alg.name for alg in algorithms] self._algs = algorithms super().__init__(settings=self.settings, **kwargs) def", "self._get_x0(x0, i) alg['pore.ic'] = x0_i = np.ones(alg.Np, dtype=float) * x0_i alg._merge_inital_and_boundary_values() # Build", "if a scalar is passed, it refers to the interval at which the", "return the solution at intermediate times (i.e., those not stored in the solution", "algorithms = [] @docstr.dedent class TransientMultiPhysics(GenericAlgorithm): r\"\"\" A subclass for transient multiphysics simulations.", "saveat=None, integrator=None): \"\"\" Runs all of the transient algorithms simultaneoulsy and returns the", "stored. integrator : Integrator, optional Integrator object which will be used to to", "super().__init__(settings=self.settings, **kwargs) def run(self, x0, tspan, saveat=None, integrator=None): \"\"\" Runs all of the", "Initialize RHS rhs = [] for i, alg in enumerate(self._algs): # Get x", "all algs x = self._get_x0(y, i) # again use helper function # Store", "in enumerate(self._algs): # Perform pre-solve validations alg._validate_settings() alg._validate_data_health() # Write x0 to algorithm", "reactive transport ---------- x0 : ndarray or float Array (or scalar) containing initial", "passed, it signifies the time points at which the solution is to be", "optional If an array is passed, it signifies the time points at which", "object independently. \"\"\" logger.info('Running TransientMultiphysics') if np.isscalar(saveat): saveat = np.arange(*tspan, saveat) if (saveat", "alg['pore.ic'] = x0_i = np.ones(alg.Np, dtype=float) * x0_i alg._merge_inital_and_boundary_values() # Build RHS (dx/dt", "as np from openpnm.utils import SettingsAttr, Docorator from openpnm.integrators import ScipyRK45 from openpnm.algorithms", "@docstr.dedent class TransientMultiPhysicsSettings: r\"\"\" Parameters ---------- %(GenericAlgorithmSettings.parameters)s algorithms: list List of transient algorithm", "algorithms: list List of transient algorithm objects to be solved in a coupled", "# Write x0 to algorithm the obj (needed by _update_iterative_props) x0_i = self._get_x0(x0,", "a scalar is passed, it refers to the interval at which the solution", "x # Build A and b alg._update_A_and_b() A = alg.A.tocsc() b = alg.b", "---------- %(GenericAlgorithmSettings.parameters)s algorithms: list List of transient algorithm objects to be solved in", "TransientMultiPhysics(GenericAlgorithm): r\"\"\" A subclass for transient multiphysics simulations. \"\"\" def __init__(self, algorithms, settings=None,", "and if a scalar is passed, it refers to the interval at which", "alg.soln return self.soln def _run_special(self, x0): ... def _build_rhs(self): \"\"\" Returns a function", "import GenericAlgorithm from openpnm.algorithms._solution import SolutionContainer, TransientSolution logger = logging.getLogger(__name__) docstr = Docorator()", "alg.A.tocsc() b = alg.b # Retrieve volume V = alg.network[alg.settings[\"pore_volume\"]] # Calcualte RHS", "list List of transient algorithm objects to be solved in a coupled manner", "a composite array that contains ALL the variables that the multiphysics algorithm solves", "x from y, assume alg.Np is same for all algs x = self._get_x0(y,", "for each physics. The solution for each physics is available on each algorithm", "enumerate(self._algs): # Perform pre-solve validations alg._validate_settings() alg._validate_data_health() # Write x0 to algorithm the", "x0, tspan, saveat) # Return dictionary containing solution self.soln = SolutionContainer() for i,", "The solution object, which is basically a numpy array with the added functionality", "each physics. The solution for each physics is available on each algorithm object", "import logging import numpy as np from openpnm.utils import SettingsAttr, Docorator from openpnm.integrators", "be stored. integrator : Integrator, optional Integrator object which will be used to", "basically a numpy array with the added functionality that it can be called", "given solver soln = integrator.solve(rhs, x0, tspan, saveat) # Return dictionary containing solution", "x0_i alg._merge_inital_and_boundary_values() # Build RHS (dx/dt = RHS), then integrate the system of", "not stored in the solution object). In the case of multiphysics, the solution", "e.g., if the constituent algorithms are ``TransientFickianDiffusion``, and ``TransientFourierConduction``, ``y[0:Np-1]`` refers to the", "= alg.A.tocsc() b = alg.b # Retrieve volume V = alg.network[alg.settings[\"pore_volume\"]] # Calcualte", "that it can be called to return the solution at intermediate times (i.e.,", "containing the integration time span. saveat : array_like or float, optional If an", "or float Array (or scalar) containing initial condition values. tspan : array_like Tuple", "np.arange(*tspan, saveat) if (saveat is not None) and (tspan[1] not in saveat): saveat", "for each physics is available on each algorithm object independently. \"\"\" logger.info('Running TransientMultiphysics')", "is to be stored, and if a scalar is passed, it refers to", "\"\"\" Runs all of the transient algorithms simultaneoulsy and returns the solution. Parameters", "import SettingsAttr, Docorator from openpnm.integrators import ScipyRK45 from openpnm.algorithms import GenericAlgorithm from openpnm.algorithms._solution", "transport ---------- x0 : ndarray or float Array (or scalar) containing initial condition", "is None else integrator for i, alg in enumerate(self._algs): # Perform pre-solve validations", "integration time span. saveat : array_like or float, optional If an array is", "y): # Initialize RHS rhs = [] for i, alg in enumerate(self._algs): #", "initial condition values. tspan : array_like Tuple (or array) containing the integration time", "x0, i): tmp = [alg.Np for alg in self._algs] idx_end = np.cumsum(tmp) idx_start", "as TransientSolution object to each alg t = soln.t x = soln[i*alg.Np:(i+1)*alg.Np, :]", "object, which is basically a numpy array with the added functionality that it", "those not stored in the solution object). In the case of multiphysics, the", "multiphysics, the solution object is a combined array of solutions for each physics.", "alg in algorithms] self._algs = algorithms super().__init__(settings=self.settings, **kwargs) def run(self, x0, tspan, saveat=None,", "Add solution of each alg to solution dictionary self.soln[alg.settings['quantity']] = alg.soln return self.soln", "logging.getLogger(__name__) docstr = Docorator() @docstr.dedent class TransientMultiPhysicsSettings: r\"\"\" Parameters ---------- %(GenericAlgorithmSettings.parameters)s algorithms: list", "again use helper function # Store x onto algorithm, alg.x = x #", "and ``[Np:2*Np-1]`` refers to the temperature values. \"\"\" def ode_func(t, y): # Initialize", "Build A and b alg._update_A_and_b() A = alg.A.tocsc() b = alg.b # Retrieve", "RHS rhs_alg = np.hstack(-A.dot(x) + b)/V rhs = np.hstack((rhs, rhs_alg)) return rhs return", "Array (or scalar) containing initial condition values. tspan : array_like Tuple (or array)", "it can be called to return the solution at intermediate times (i.e., those", "rhs_alg = np.hstack(-A.dot(x) + b)/V rhs = np.hstack((rhs, rhs_alg)) return rhs return ode_func", "integrator.solve(rhs, x0, tspan, saveat) # Return dictionary containing solution self.soln = SolutionContainer() for", "each physics is available on each algorithm object independently. \"\"\" logger.info('Running TransientMultiphysics') if", "**kwargs): self.settings = SettingsAttr(TransientMultiPhysicsSettings, settings) self.settings.algorithms = [alg.name for alg in algorithms] self._algs", "None else integrator for i, alg in enumerate(self._algs): # Perform pre-solve validations alg._validate_settings()", "each alg to solution dictionary self.soln[alg.settings['quantity']] = alg.soln return self.soln def _run_special(self, x0):", "refers to the temperature values. \"\"\" def ode_func(t, y): # Initialize RHS rhs", "= rhs(y, t). Notes ----- ``y`` is a composite array that contains ALL", "b = alg.b # Retrieve volume V = alg.network[alg.settings[\"pore_volume\"]] # Calcualte RHS rhs_alg", "solver soln = integrator.solve(rhs, x0, tspan, saveat) # Return dictionary containing solution self.soln", "solution object). In the case of multiphysics, the solution object is a combined", "alg.Np is same for all algs x = self._get_x0(y, i) # again use", "transient algorithm objects to be solved in a coupled manner \"\"\" algorithms =", "class TransientMultiPhysics(GenericAlgorithm): r\"\"\" A subclass for transient multiphysics simulations. \"\"\" def __init__(self, algorithms,", "alg._merge_inital_and_boundary_values() # Build RHS (dx/dt = RHS), then integrate the system of ODEs", "for, e.g., if the constituent algorithms are ``TransientFickianDiffusion``, and ``TransientFourierConduction``, ``y[0:Np-1]`` refers to", "temperature values. \"\"\" def ode_func(t, y): # Initialize RHS rhs = [] for", "= soln[i*alg.Np:(i+1)*alg.Np, :] alg.soln = TransientSolution(t, x) # Add solution of each alg", "be stored, and if a scalar is passed, it refers to the interval", "= integrator.solve(rhs, x0, tspan, saveat) # Return dictionary containing solution self.soln = SolutionContainer()", "to solution dictionary self.soln[alg.settings['quantity']] = alg.soln return self.soln def _run_special(self, x0): ... def", "array_like or float, optional If an array is passed, it signifies the time", "of ODEs rhs = self._build_rhs() # Integrate RHS using the given solver soln", "to each alg t = soln.t x = soln[i*alg.Np:(i+1)*alg.Np, :] alg.soln = TransientSolution(t,", "openpnm.integrators import ScipyRK45 from openpnm.algorithms import GenericAlgorithm from openpnm.algorithms._solution import SolutionContainer, TransientSolution logger", "the time stepping. Can be instantiated using openpnm.integrators module. Returns ------- TransientSolution The", "array) containing the integration time span. saveat : array_like or float, optional If", "= alg.b # Retrieve volume V = alg.network[alg.settings[\"pore_volume\"]] # Calcualte RHS rhs_alg =", "= ScipyRK45() if integrator is None else integrator for i, alg in enumerate(self._algs):", "the multiphysics algorithm solves for, e.g., if the constituent algorithms are ``TransientFickianDiffusion``, and", "saveat) if (saveat is not None) and (tspan[1] not in saveat): saveat =", "dictionary self.soln[alg.settings['quantity']] = alg.soln return self.soln def _run_special(self, x0): ... def _build_rhs(self): \"\"\"", "which calculates dy/dt = rhs(y, t). Notes ----- ``y`` is a composite array", "soln[i*alg.Np:(i+1)*alg.Np, :] alg.soln = TransientSolution(t, x) # Add solution of each alg to", "**kwargs) def run(self, x0, tspan, saveat=None, integrator=None): \"\"\" Runs all of the transient", "= SolutionContainer() for i, alg in enumerate(self._algs): # Slice soln and attach as", "dy/dt = rhs(y, t). Notes ----- ``y`` is a composite array that contains", "which the solution is to be stored, and if a scalar is passed,", "np from openpnm.utils import SettingsAttr, Docorator from openpnm.integrators import ScipyRK45 from openpnm.algorithms import", "\"\"\" Returns a function handle, which calculates dy/dt = rhs(y, t). Notes -----", "ode_func def _get_x0(self, x0, i): tmp = [alg.Np for alg in self._algs] idx_end", "np.hstack((saveat, [tspan[1]])) integrator = ScipyRK45() if integrator is None else integrator for i,", "openpnm.integrators module. Returns ------- TransientSolution The solution object, which is basically a numpy", "be solved in a coupled manner \"\"\" algorithms = [] @docstr.dedent class TransientMultiPhysics(GenericAlgorithm):", "if the constituent algorithms are ``TransientFickianDiffusion``, and ``TransientFourierConduction``, ``y[0:Np-1]`` refers to the concentration,", "# Add solution of each alg to solution dictionary self.soln[alg.settings['quantity']] = alg.soln return", "``y`` is a composite array that contains ALL the variables that the multiphysics", "saveat = np.arange(*tspan, saveat) if (saveat is not None) and (tspan[1] not in", "x0 : ndarray or float Array (or scalar) containing initial condition values. tspan", "is not None) and (tspan[1] not in saveat): saveat = np.hstack((saveat, [tspan[1]])) integrator", "SettingsAttr, Docorator from openpnm.integrators import ScipyRK45 from openpnm.algorithms import GenericAlgorithm from openpnm.algorithms._solution import", "Returns ------- TransientSolution The solution object, which is basically a numpy array with", "def ode_func(t, y): # Initialize RHS rhs = [] for i, alg in", "algorithm object independently. \"\"\" logger.info('Running TransientMultiphysics') if np.isscalar(saveat): saveat = np.arange(*tspan, saveat) if", "= self._get_x0(x0, i) alg['pore.ic'] = x0_i = np.ones(alg.Np, dtype=float) * x0_i alg._merge_inital_and_boundary_values() #", "float, optional If an array is passed, it signifies the time points at", "self._get_x0(y, i) # again use helper function # Store x onto algorithm, alg.x", "saveat : array_like or float, optional If an array is passed, it signifies", "\"\"\" algorithms = [] @docstr.dedent class TransientMultiPhysics(GenericAlgorithm): r\"\"\" A subclass for transient multiphysics", "calculates dy/dt = rhs(y, t). Notes ----- ``y`` is a composite array that", "saveat): saveat = np.hstack((saveat, [tspan[1]])) integrator = ScipyRK45() if integrator is None else", "np.hstack((rhs, rhs_alg)) return rhs return ode_func def _get_x0(self, x0, i): tmp = [alg.Np", "else integrator for i, alg in enumerate(self._algs): # Perform pre-solve validations alg._validate_settings() alg._validate_data_health()", "is basically a numpy array with the added functionality that it can be", "# Build RHS (dx/dt = RHS), then integrate the system of ODEs rhs", "enumerate(self._algs): # Get x from y, assume alg.Np is same for all algs", "Can be instantiated using openpnm.integrators module. Returns ------- TransientSolution The solution object, which", "integrate the system of ODEs rhs = self._build_rhs() # Integrate RHS using the", "that contains ALL the variables that the multiphysics algorithm solves for, e.g., if", "physics is available on each algorithm object independently. \"\"\" logger.info('Running TransientMultiphysics') if np.isscalar(saveat):", "handle, which calculates dy/dt = rhs(y, t). Notes ----- ``y`` is a composite", "# Retrieve volume V = alg.network[alg.settings[\"pore_volume\"]] # Calcualte RHS rhs_alg = np.hstack(-A.dot(x) +", "Tuple (or array) containing the integration time span. saveat : array_like or float,", "Docorator() @docstr.dedent class TransientMultiPhysicsSettings: r\"\"\" Parameters ---------- %(GenericAlgorithmSettings.parameters)s algorithms: list List of transient", "same for all algs x = self._get_x0(y, i) # again use helper function", "the concentration, and ``[Np:2*Np-1]`` refers to the temperature values. \"\"\" def ode_func(t, y):", "a numpy array with the added functionality that it can be called to", "alg.b # Retrieve volume V = alg.network[alg.settings[\"pore_volume\"]] # Calcualte RHS rhs_alg = np.hstack(-A.dot(x)", "__init__(self, algorithms, settings=None, **kwargs): self.settings = SettingsAttr(TransientMultiPhysicsSettings, settings) self.settings.algorithms = [alg.name for alg", "rhs(y, t). Notes ----- ``y`` is a composite array that contains ALL the", "and b alg._update_A_and_b() A = alg.A.tocsc() b = alg.b # Retrieve volume V", "not in saveat): saveat = np.hstack((saveat, [tspan[1]])) integrator = ScipyRK45() if integrator is", "with the added functionality that it can be called to return the solution", "alg to solution dictionary self.soln[alg.settings['quantity']] = alg.soln return self.soln def _run_special(self, x0): ...", "in saveat): saveat = np.hstack((saveat, [tspan[1]])) integrator = ScipyRK45() if integrator is None", "be instantiated using openpnm.integrators module. Returns ------- TransientSolution The solution object, which is", "independently. \"\"\" logger.info('Running TransientMultiphysics') if np.isscalar(saveat): saveat = np.arange(*tspan, saveat) if (saveat is", "tmp = [alg.Np for alg in self._algs] idx_end = np.cumsum(tmp) idx_start = np.hstack((0,", "algorithms simultaneoulsy and returns the solution. Parameters steal from transient reactive transport ----------", "= np.hstack((rhs, rhs_alg)) return rhs return ode_func def _get_x0(self, x0, i): tmp =", "alg in enumerate(self._algs): # Slice soln and attach as TransientSolution object to each", "= SettingsAttr(TransientMultiPhysicsSettings, settings) self.settings.algorithms = [alg.name for alg in algorithms] self._algs = algorithms", "Integrator, optional Integrator object which will be used to to the time stepping.", "A and b alg._update_A_and_b() A = alg.A.tocsc() b = alg.b # Retrieve volume", "scalar is passed, it refers to the interval at which the solution is", "tspan, saveat=None, integrator=None): \"\"\" Runs all of the transient algorithms simultaneoulsy and returns", "= [alg.Np for alg in self._algs] idx_end = np.cumsum(tmp) idx_start = np.hstack((0, idx_end[:-1]))", "= Docorator() @docstr.dedent class TransientMultiPhysicsSettings: r\"\"\" Parameters ---------- %(GenericAlgorithmSettings.parameters)s algorithms: list List of", "x0): ... def _build_rhs(self): \"\"\" Returns a function handle, which calculates dy/dt =", "from openpnm.algorithms._solution import SolutionContainer, TransientSolution logger = logging.getLogger(__name__) docstr = Docorator() @docstr.dedent class", "that the multiphysics algorithm solves for, e.g., if the constituent algorithms are ``TransientFickianDiffusion``,", "# Slice soln and attach as TransientSolution object to each alg t =", ": array_like Tuple (or array) containing the integration time span. saveat : array_like", "import numpy as np from openpnm.utils import SettingsAttr, Docorator from openpnm.integrators import ScipyRK45", "GenericAlgorithm from openpnm.algorithms._solution import SolutionContainer, TransientSolution logger = logging.getLogger(__name__) docstr = Docorator() @docstr.dedent", "instantiated using openpnm.integrators module. Returns ------- TransientSolution The solution object, which is basically", "x = soln[i*alg.Np:(i+1)*alg.Np, :] alg.soln = TransientSolution(t, x) # Add solution of each", "case of multiphysics, the solution object is a combined array of solutions for", "multiphysics algorithm solves for, e.g., if the constituent algorithms are ``TransientFickianDiffusion``, and ``TransientFourierConduction``,", "from transient reactive transport ---------- x0 : ndarray or float Array (or scalar)", "array with the added functionality that it can be called to return the" ]
[ ": 5, 'Brot' : 3, 'Wein' : 2, 'Eier' : 6, 'Nuss' :", "5, 'Brot' : 3, 'Wein' : 2, 'Eier' : 6, 'Nuss' : 12,", "dic={'Käse' : 5, 'Brot' : 3, 'Wein' : 2, 'Eier' : 6, 'Nuss'", "'#')).center(60)) for namen, anzahl in dic.items(): print((namen.ljust(13, '.') + str(anzahl).rjust(3, '.')).center(60)) print(('#'.center(16, '#')).center(60))", "'Müsli' : 1} print(('Inventar'.center(16, '#')).center(60)) for namen, anzahl in dic.items(): print((namen.ljust(13, '.') +", ": 3, 'Wein' : 2, 'Eier' : 6, 'Nuss' : 12, 'Tee' :", "'Nuss' : 12, 'Tee' : 14, 'Müsli' : 1} print(('Inventar'.center(16, '#')).center(60)) for namen,", "'Eier' : 6, 'Nuss' : 12, 'Tee' : 14, 'Müsli' : 1} print(('Inventar'.center(16,", "print(('Inventar'.center(16, '#')).center(60)) for namen, anzahl in dic.items(): print((namen.ljust(13, '.') + str(anzahl).rjust(3, '.')).center(60)) print(('#'.center(16,", ": 12, 'Tee' : 14, 'Müsli' : 1} print(('Inventar'.center(16, '#')).center(60)) for namen, anzahl", ": 6, 'Nuss' : 12, 'Tee' : 14, 'Müsli' : 1} print(('Inventar'.center(16, '#')).center(60))", "1} print(('Inventar'.center(16, '#')).center(60)) for namen, anzahl in dic.items(): print((namen.ljust(13, '.') + str(anzahl).rjust(3, '.')).center(60))", "14, 'Müsli' : 1} print(('Inventar'.center(16, '#')).center(60)) for namen, anzahl in dic.items(): print((namen.ljust(13, '.')", "12, 'Tee' : 14, 'Müsli' : 1} print(('Inventar'.center(16, '#')).center(60)) for namen, anzahl in", ": 14, 'Müsli' : 1} print(('Inventar'.center(16, '#')).center(60)) for namen, anzahl in dic.items(): print((namen.ljust(13,", ": 2, 'Eier' : 6, 'Nuss' : 12, 'Tee' : 14, 'Müsli' :", "'Brot' : 3, 'Wein' : 2, 'Eier' : 6, 'Nuss' : 12, 'Tee'", "'Tee' : 14, 'Müsli' : 1} print(('Inventar'.center(16, '#')).center(60)) for namen, anzahl in dic.items():", "'Wein' : 2, 'Eier' : 6, 'Nuss' : 12, 'Tee' : 14, 'Müsli'", "2, 'Eier' : 6, 'Nuss' : 12, 'Tee' : 14, 'Müsli' : 1}", "6, 'Nuss' : 12, 'Tee' : 14, 'Müsli' : 1} print(('Inventar'.center(16, '#')).center(60)) for", ": 1} print(('Inventar'.center(16, '#')).center(60)) for namen, anzahl in dic.items(): print((namen.ljust(13, '.') + str(anzahl).rjust(3,", "Formatierte Stringausgabe dic={'Käse' : 5, 'Brot' : 3, 'Wein' : 2, 'Eier' :", "3, 'Wein' : 2, 'Eier' : 6, 'Nuss' : 12, 'Tee' : 14,", "Stringausgabe dic={'Käse' : 5, 'Brot' : 3, 'Wein' : 2, 'Eier' : 6,", "# Formatierte Stringausgabe dic={'Käse' : 5, 'Brot' : 3, 'Wein' : 2, 'Eier'" ]
[ "Path def create_config(): \"\"\" Creates a merged dictionary between the `config.json` and the", "generic access token was made specific so that the request headers are easily", "env = sys.argv[1] else: env = \"dev\" if env not in {\"dev\", \"prod\",", "+ dict2[k] ) yield (k, dict1[\"headers\"][\"authorization\"]) else: yield (k, dict2[k]) config = create_config()", "dict(_merge_configs(dict1[k], dict2[k]))) else: yield (k, dict2[k]) elif k in dict1: yield (k, dict1[k])", "else: raise FileExistsError(\"The env config file does not exist\") return dict(_merge_configs(default_config, env_config)) def", "dict1[k]) else: if k == \"generic_access_token\": dict1[\"headers\"][\"authorization\"] = ( dict1[\"headers\"][\"authorization\"] + dict2[k] )", "`config.json` and the respective environment's config, by merging them. Sidenote: the implementation for", "k in dict2: if isinstance(dict1[k], dict) and isinstance(dict2[k], dict): yield (k, dict(_merge_configs(dict1[k], dict2[k])))", "json.load(config_file) if len(sys.argv) >= 1: env = sys.argv[1] else: env = \"dev\" if", "in dict2: if isinstance(dict1[k], dict) and isinstance(dict2[k], dict): yield (k, dict(_merge_configs(dict1[k], dict2[k]))) else:", "else: yield (k, dict2[k]) elif k in dict1: yield (k, dict1[k]) else: if", "= json.load(config_file) if len(sys.argv) >= 1: env = sys.argv[1] else: env = \"dev\"", "dict2[k]) elif k in dict1: yield (k, dict1[k]) else: if k == \"generic_access_token\":", "and the respective environment's config, by merging them. Sidenote: the implementation for the", "\"\"\" Creates a merged dictionary between the `config.json` and the respective environment's config,", "in {\"dev\", \"prod\", \"staging\", \"test\"}: raise ValueError(\"Invalid env name\") env_config_path = Path(\"./config.{}.json\".format(env)) if", "= json.load(env_config_file) else: raise FileExistsError(\"The env config file does not exist\") return dict(_merge_configs(default_config,", "import json import sys from pathlib import Path def create_config(): \"\"\" Creates a", "dict2: if isinstance(dict1[k], dict) and isinstance(dict2[k], dict): yield (k, dict(_merge_configs(dict1[k], dict2[k]))) else: yield", "def create_config(): \"\"\" Creates a merged dictionary between the `config.json` and the respective", "config file does not exist\") return dict(_merge_configs(default_config, env_config)) def _merge_configs(dict1, dict2): for k", "\"test\"}: raise ValueError(\"Invalid env name\") env_config_path = Path(\"./config.{}.json\".format(env)) if env_config_path.exists(): with open(env_config_path) as", "dict) and isinstance(dict2[k], dict): yield (k, dict(_merge_configs(dict1[k], dict2[k]))) else: yield (k, dict2[k]) elif", "= sys.argv[1] else: env = \"dev\" if env not in {\"dev\", \"prod\", \"staging\",", "\"generic_access_token\": dict1[\"headers\"][\"authorization\"] = ( dict1[\"headers\"][\"authorization\"] + dict2[k] ) yield (k, dict1[\"headers\"][\"authorization\"]) else: yield", "respective environment's config, by merging them. Sidenote: the implementation for the generic access", "k in dict1 and k in dict2: if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):", "does not exist\") return dict(_merge_configs(default_config, env_config)) def _merge_configs(dict1, dict2): for k in set(dict1.keys()).union(dict2.keys()):", "default_config = json.load(config_file) if len(sys.argv) >= 1: env = sys.argv[1] else: env =", "in dict1 and k in dict2: if isinstance(dict1[k], dict) and isinstance(dict2[k], dict): yield", "{\"dev\", \"prod\", \"staging\", \"test\"}: raise ValueError(\"Invalid env name\") env_config_path = Path(\"./config.{}.json\".format(env)) if env_config_path.exists():", "as env_config_file: env_config = json.load(env_config_file) else: raise FileExistsError(\"The env config file does not", "== \"generic_access_token\": dict1[\"headers\"][\"authorization\"] = ( dict1[\"headers\"][\"authorization\"] + dict2[k] ) yield (k, dict1[\"headers\"][\"authorization\"]) else:", "if env not in {\"dev\", \"prod\", \"staging\", \"test\"}: raise ValueError(\"Invalid env name\") env_config_path", "k in dict1: yield (k, dict1[k]) else: if k == \"generic_access_token\": dict1[\"headers\"][\"authorization\"] =", "sys.argv[1] else: env = \"dev\" if env not in {\"dev\", \"prod\", \"staging\", \"test\"}:", "specific so that the request headers are easily generated with just changing the", "else: if k == \"generic_access_token\": dict1[\"headers\"][\"authorization\"] = ( dict1[\"headers\"][\"authorization\"] + dict2[k] ) yield", "1: env = sys.argv[1] else: env = \"dev\" if env not in {\"dev\",", ">= 1: env = sys.argv[1] else: env = \"dev\" if env not in", "else: env = \"dev\" if env not in {\"dev\", \"prod\", \"staging\", \"test\"}: raise", "isinstance(dict1[k], dict) and isinstance(dict2[k], dict): yield (k, dict(_merge_configs(dict1[k], dict2[k]))) else: yield (k, dict2[k])", "token per environment. \"\"\" with open(\"./config.json\", \"r\") as config_file: default_config = json.load(config_file) if", "_merge_configs(dict1, dict2): for k in set(dict1.keys()).union(dict2.keys()): if k in dict1 and k in", "dict1[\"headers\"][\"authorization\"] + dict2[k] ) yield (k, dict1[\"headers\"][\"authorization\"]) else: yield (k, dict2[k]) config =", "the implementation for the generic access token was made specific so that the", "<filename>shorty/config/parser.py import json import sys from pathlib import Path def create_config(): \"\"\" Creates", "with open(\"./config.json\", \"r\") as config_file: default_config = json.load(config_file) if len(sys.argv) >= 1: env", "set(dict1.keys()).union(dict2.keys()): if k in dict1 and k in dict2: if isinstance(dict1[k], dict) and", "Creates a merged dictionary between the `config.json` and the respective environment's config, by", "\"prod\", \"staging\", \"test\"}: raise ValueError(\"Invalid env name\") env_config_path = Path(\"./config.{}.json\".format(env)) if env_config_path.exists(): with", "dict1[\"headers\"][\"authorization\"] = ( dict1[\"headers\"][\"authorization\"] + dict2[k] ) yield (k, dict1[\"headers\"][\"authorization\"]) else: yield (k,", "elif k in dict1: yield (k, dict1[k]) else: if k == \"generic_access_token\": dict1[\"headers\"][\"authorization\"]", "Sidenote: the implementation for the generic access token was made specific so that", "isinstance(dict2[k], dict): yield (k, dict(_merge_configs(dict1[k], dict2[k]))) else: yield (k, dict2[k]) elif k in", "config_file: default_config = json.load(config_file) if len(sys.argv) >= 1: env = sys.argv[1] else: env", "merged dictionary between the `config.json` and the respective environment's config, by merging them.", "if env_config_path.exists(): with open(env_config_path) as env_config_file: env_config = json.load(env_config_file) else: raise FileExistsError(\"The env", "env_config = json.load(env_config_file) else: raise FileExistsError(\"The env config file does not exist\") return", "the respective environment's config, by merging them. Sidenote: the implementation for the generic", "Path(\"./config.{}.json\".format(env)) if env_config_path.exists(): with open(env_config_path) as env_config_file: env_config = json.load(env_config_file) else: raise FileExistsError(\"The", "return dict(_merge_configs(default_config, env_config)) def _merge_configs(dict1, dict2): for k in set(dict1.keys()).union(dict2.keys()): if k in", "yield (k, dict1[k]) else: if k == \"generic_access_token\": dict1[\"headers\"][\"authorization\"] = ( dict1[\"headers\"][\"authorization\"] +", "for the generic access token was made specific so that the request headers", "raise FileExistsError(\"The env config file does not exist\") return dict(_merge_configs(default_config, env_config)) def _merge_configs(dict1,", "implementation for the generic access token was made specific so that the request", "if k == \"generic_access_token\": dict1[\"headers\"][\"authorization\"] = ( dict1[\"headers\"][\"authorization\"] + dict2[k] ) yield (k,", "them. Sidenote: the implementation for the generic access token was made specific so", "( dict1[\"headers\"][\"authorization\"] + dict2[k] ) yield (k, dict1[\"headers\"][\"authorization\"]) else: yield (k, dict2[k]) config", "environment's config, by merging them. Sidenote: the implementation for the generic access token", "dictionary between the `config.json` and the respective environment's config, by merging them. Sidenote:", "config, by merging them. Sidenote: the implementation for the generic access token was", "merging them. Sidenote: the implementation for the generic access token was made specific", "json import sys from pathlib import Path def create_config(): \"\"\" Creates a merged", "env not in {\"dev\", \"prod\", \"staging\", \"test\"}: raise ValueError(\"Invalid env name\") env_config_path =", "easily generated with just changing the access token per environment. \"\"\" with open(\"./config.json\",", "access token was made specific so that the request headers are easily generated", "(k, dict(_merge_configs(dict1[k], dict2[k]))) else: yield (k, dict2[k]) elif k in dict1: yield (k,", "just changing the access token per environment. \"\"\" with open(\"./config.json\", \"r\") as config_file:", "from pathlib import Path def create_config(): \"\"\" Creates a merged dictionary between the", "(k, dict1[k]) else: if k == \"generic_access_token\": dict1[\"headers\"][\"authorization\"] = ( dict1[\"headers\"][\"authorization\"] + dict2[k]", "was made specific so that the request headers are easily generated with just", "token was made specific so that the request headers are easily generated with", "= ( dict1[\"headers\"][\"authorization\"] + dict2[k] ) yield (k, dict1[\"headers\"][\"authorization\"]) else: yield (k, dict2[k])", "and isinstance(dict2[k], dict): yield (k, dict(_merge_configs(dict1[k], dict2[k]))) else: yield (k, dict2[k]) elif k", "pathlib import Path def create_config(): \"\"\" Creates a merged dictionary between the `config.json`", "env name\") env_config_path = Path(\"./config.{}.json\".format(env)) if env_config_path.exists(): with open(env_config_path) as env_config_file: env_config =", "import Path def create_config(): \"\"\" Creates a merged dictionary between the `config.json` and", "= Path(\"./config.{}.json\".format(env)) if env_config_path.exists(): with open(env_config_path) as env_config_file: env_config = json.load(env_config_file) else: raise", "k == \"generic_access_token\": dict1[\"headers\"][\"authorization\"] = ( dict1[\"headers\"][\"authorization\"] + dict2[k] ) yield (k, dict1[\"headers\"][\"authorization\"])", "access token per environment. \"\"\" with open(\"./config.json\", \"r\") as config_file: default_config = json.load(config_file)", "\"\"\" with open(\"./config.json\", \"r\") as config_file: default_config = json.load(config_file) if len(sys.argv) >= 1:", "dict(_merge_configs(default_config, env_config)) def _merge_configs(dict1, dict2): for k in set(dict1.keys()).union(dict2.keys()): if k in dict1", "in set(dict1.keys()).union(dict2.keys()): if k in dict1 and k in dict2: if isinstance(dict1[k], dict)", "made specific so that the request headers are easily generated with just changing", "env_config_file: env_config = json.load(env_config_file) else: raise FileExistsError(\"The env config file does not exist\")", "in dict1: yield (k, dict1[k]) else: if k == \"generic_access_token\": dict1[\"headers\"][\"authorization\"] = (", "\"dev\" if env not in {\"dev\", \"prod\", \"staging\", \"test\"}: raise ValueError(\"Invalid env name\")", "FileExistsError(\"The env config file does not exist\") return dict(_merge_configs(default_config, env_config)) def _merge_configs(dict1, dict2):", "yield (k, dict2[k]) elif k in dict1: yield (k, dict1[k]) else: if k", "open(\"./config.json\", \"r\") as config_file: default_config = json.load(config_file) if len(sys.argv) >= 1: env =", "env = \"dev\" if env not in {\"dev\", \"prod\", \"staging\", \"test\"}: raise ValueError(\"Invalid", "the request headers are easily generated with just changing the access token per", "if len(sys.argv) >= 1: env = sys.argv[1] else: env = \"dev\" if env", "the access token per environment. \"\"\" with open(\"./config.json\", \"r\") as config_file: default_config =", "the `config.json` and the respective environment's config, by merging them. Sidenote: the implementation", "= \"dev\" if env not in {\"dev\", \"prod\", \"staging\", \"test\"}: raise ValueError(\"Invalid env", "per environment. \"\"\" with open(\"./config.json\", \"r\") as config_file: default_config = json.load(config_file) if len(sys.argv)", "dict2): for k in set(dict1.keys()).union(dict2.keys()): if k in dict1 and k in dict2:", "headers are easily generated with just changing the access token per environment. \"\"\"", "env_config_path.exists(): with open(env_config_path) as env_config_file: env_config = json.load(env_config_file) else: raise FileExistsError(\"The env config", "generated with just changing the access token per environment. \"\"\" with open(\"./config.json\", \"r\")", "not in {\"dev\", \"prod\", \"staging\", \"test\"}: raise ValueError(\"Invalid env name\") env_config_path = Path(\"./config.{}.json\".format(env))", "for k in set(dict1.keys()).union(dict2.keys()): if k in dict1 and k in dict2: if", "def _merge_configs(dict1, dict2): for k in set(dict1.keys()).union(dict2.keys()): if k in dict1 and k", "dict1 and k in dict2: if isinstance(dict1[k], dict) and isinstance(dict2[k], dict): yield (k,", "if isinstance(dict1[k], dict) and isinstance(dict2[k], dict): yield (k, dict(_merge_configs(dict1[k], dict2[k]))) else: yield (k,", "len(sys.argv) >= 1: env = sys.argv[1] else: env = \"dev\" if env not", "yield (k, dict(_merge_configs(dict1[k], dict2[k]))) else: yield (k, dict2[k]) elif k in dict1: yield", "a merged dictionary between the `config.json` and the respective environment's config, by merging", "k in set(dict1.keys()).union(dict2.keys()): if k in dict1 and k in dict2: if isinstance(dict1[k],", "that the request headers are easily generated with just changing the access token", "with open(env_config_path) as env_config_file: env_config = json.load(env_config_file) else: raise FileExistsError(\"The env config file", "dict): yield (k, dict(_merge_configs(dict1[k], dict2[k]))) else: yield (k, dict2[k]) elif k in dict1:", "dict1: yield (k, dict1[k]) else: if k == \"generic_access_token\": dict1[\"headers\"][\"authorization\"] = ( dict1[\"headers\"][\"authorization\"]", "environment. \"\"\" with open(\"./config.json\", \"r\") as config_file: default_config = json.load(config_file) if len(sys.argv) >=", "env_config)) def _merge_configs(dict1, dict2): for k in set(dict1.keys()).union(dict2.keys()): if k in dict1 and", "the generic access token was made specific so that the request headers are", "not exist\") return dict(_merge_configs(default_config, env_config)) def _merge_configs(dict1, dict2): for k in set(dict1.keys()).union(dict2.keys()): if", "are easily generated with just changing the access token per environment. \"\"\" with", "as config_file: default_config = json.load(config_file) if len(sys.argv) >= 1: env = sys.argv[1] else:", "file does not exist\") return dict(_merge_configs(default_config, env_config)) def _merge_configs(dict1, dict2): for k in", "env_config_path = Path(\"./config.{}.json\".format(env)) if env_config_path.exists(): with open(env_config_path) as env_config_file: env_config = json.load(env_config_file) else:", "ValueError(\"Invalid env name\") env_config_path = Path(\"./config.{}.json\".format(env)) if env_config_path.exists(): with open(env_config_path) as env_config_file: env_config", "and k in dict2: if isinstance(dict1[k], dict) and isinstance(dict2[k], dict): yield (k, dict(_merge_configs(dict1[k],", "dict2[k]))) else: yield (k, dict2[k]) elif k in dict1: yield (k, dict1[k]) else:", "env config file does not exist\") return dict(_merge_configs(default_config, env_config)) def _merge_configs(dict1, dict2): for", "json.load(env_config_file) else: raise FileExistsError(\"The env config file does not exist\") return dict(_merge_configs(default_config, env_config))", "so that the request headers are easily generated with just changing the access", "if k in dict1 and k in dict2: if isinstance(dict1[k], dict) and isinstance(dict2[k],", "request headers are easily generated with just changing the access token per environment.", "with just changing the access token per environment. \"\"\" with open(\"./config.json\", \"r\") as", "changing the access token per environment. \"\"\" with open(\"./config.json\", \"r\") as config_file: default_config", "name\") env_config_path = Path(\"./config.{}.json\".format(env)) if env_config_path.exists(): with open(env_config_path) as env_config_file: env_config = json.load(env_config_file)", "between the `config.json` and the respective environment's config, by merging them. Sidenote: the", "exist\") return dict(_merge_configs(default_config, env_config)) def _merge_configs(dict1, dict2): for k in set(dict1.keys()).union(dict2.keys()): if k", "\"staging\", \"test\"}: raise ValueError(\"Invalid env name\") env_config_path = Path(\"./config.{}.json\".format(env)) if env_config_path.exists(): with open(env_config_path)", "import sys from pathlib import Path def create_config(): \"\"\" Creates a merged dictionary", "create_config(): \"\"\" Creates a merged dictionary between the `config.json` and the respective environment's", "sys from pathlib import Path def create_config(): \"\"\" Creates a merged dictionary between", "raise ValueError(\"Invalid env name\") env_config_path = Path(\"./config.{}.json\".format(env)) if env_config_path.exists(): with open(env_config_path) as env_config_file:", "(k, dict2[k]) elif k in dict1: yield (k, dict1[k]) else: if k ==", "\"r\") as config_file: default_config = json.load(config_file) if len(sys.argv) >= 1: env = sys.argv[1]", "by merging them. Sidenote: the implementation for the generic access token was made", "open(env_config_path) as env_config_file: env_config = json.load(env_config_file) else: raise FileExistsError(\"The env config file does" ]
[ "torch.ceil(T * valid_ratio).long() - 1 valid_hf.append(holistic_feat[i, valid_step, :]) valid_hf = torch.stack(valid_hf, dim=0) else:", "valid_ratio in enumerate(valid_ratios): # use torch.ceil to replace original math.ceil and if else", "if else in mmocr valid_step = torch.ceil(T * valid_ratio).long() - 1 valid_hf.append(holistic_feat[i, valid_step,", "this function to: 1. convert tuple value of feat.size to int, making model", "output from SAREncoder. The shape [N, M]. \"\"\" if img_metas is not None:", "holistic_feat = self.rnn_encoder(feat_v)[0] # bsz * T * C if valid_ratios is not", "making model exportable. 2. use torch.ceil to replace original math.ceil and if else", "bsz * W * C holistic_feat = self.rnn_encoder(feat_v)[0] # bsz * T *", "torch.nn.functional as F from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='mmocr.models.textrecog.encoders.SAREncoder.forward', backend='default') def sar_encoder__forward(ctx, self,", "and if else in mmocr. Args: ctx (ContextCaller): The context with additional information.", "where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape',", "to replace original math.ceil and if else in mmocr valid_step = torch.ceil(T *", "else in mmocr valid_step = torch.ceil(T * valid_ratio).long() - 1 valid_hf.append(holistic_feat[i, valid_step, :])", "torch import torch.nn.functional as F from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='mmocr.models.textrecog.encoders.SAREncoder.forward', backend='default') def", "Rewrite this function to: 1. convert tuple value of feat.size to int, making", "* C * W feat_v = feat_v.permute(0, 2, 1).contiguous() # bsz * W", "* C if valid_ratios is not None: valid_hf = [] T = holistic_feat.size(1)", "= feat_v.permute(0, 2, 1).contiguous() # bsz * W * C holistic_feat = self.rnn_encoder(feat_v)[0]", "int(feat.size(2)) feat_v = F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1, padding=0) feat_v = feat_v.squeeze(2) # bsz", "= feat_v.squeeze(2) # bsz * C * W feat_v = feat_v.permute(0, 2, 1).contiguous()", "mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='mmocr.models.textrecog.encoders.SAREncoder.forward', backend='default') def sar_encoder__forward(ctx, self, feat, img_metas=None): \"\"\"Rewrite `forward`", "1. convert tuple value of feat.size to int, making model exportable. 2. use", "The shape [N, M]. \"\"\" if img_metas is not None: assert utils.is_type_list(img_metas, dict)", "values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. Returns: holistic_feat (Tensor): A feature map output", "None if img_metas is not None: valid_ratios = [ img_meta.get('valid_ratio', 1.0) for img_meta", "For details on the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. Returns: holistic_feat (Tensor):", "# bsz * C * W feat_v = feat_v.permute(0, 2, 1).contiguous() # bsz", "import mmocr.utils as utils import torch import torch.nn.functional as F from mmdeploy.core import", "shape [N, M]. \"\"\" if img_metas is not None: assert utils.is_type_list(img_metas, dict) assert", "holistic_feat.size(1) for i, valid_ratio in enumerate(valid_ratios): # use torch.ceil to replace original math.ceil", "= holistic_feat.size(1) for i, valid_ratio in enumerate(valid_ratios): # use torch.ceil to replace original", "* valid_ratio).long() - 1 valid_hf.append(holistic_feat[i, valid_step, :]) valid_hf = torch.stack(valid_hf, dim=0) else: valid_hf", "valid_step, :]) valid_hf = torch.stack(valid_hf, dim=0) else: valid_hf = holistic_feat[:, -1, :] #", "# use torch.ceil to replace original math.ceil and if else in mmocr valid_step", "= self.rnn_encoder(feat_v)[0] # bsz * T * C if valid_ratios is not None:", "if self.mask else None h_feat = int(feat.size(2)) feat_v = F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1,", "(Tensor): Encoded feature map of shape (N, C, H, W). img_metas (Optional[list[dict]]): A", "valid_ratios is not None: valid_hf = [] T = holistic_feat.size(1) for i, valid_ratio", "info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain", "= [] T = holistic_feat.size(1) for i, valid_ratio in enumerate(valid_ratios): # use torch.ceil", "rights reserved. import mmocr.utils as utils import torch import torch.nn.functional as F from", "2. use torch.ceil to replace original math.ceil and if else in mmocr. Args:", "def sar_encoder__forward(ctx, self, feat, img_metas=None): \"\"\"Rewrite `forward` of SAREncoder for default backend. Rewrite", "feature map output from SAREncoder. The shape [N, M]. \"\"\" if img_metas is", "== feat.size(0) valid_ratios = None if img_metas is not None: valid_ratios = [", "if img_metas is not None: valid_ratios = [ img_meta.get('valid_ratio', 1.0) for img_meta in", ":] # bsz * C holistic_feat = self.linear(valid_hf) # bsz * C return", "of shape (N, C, H, W). img_metas (Optional[list[dict]]): A list of image info", "img_metas (Optional[list[dict]]): A list of image info dict where each dict has: 'img_shape',", "to: 1. convert tuple value of feat.size to int, making model exportable. 2.", "class SAREncoder. feat (Tensor): Encoded feature map of shape (N, C, H, W).", "The instance of the class SAREncoder. feat (Tensor): Encoded feature map of shape", "# bsz * C holistic_feat = self.linear(valid_hf) # bsz * C return holistic_feat", "and if else in mmocr valid_step = torch.ceil(T * valid_ratio).long() - 1 valid_hf.append(holistic_feat[i,", "C * W feat_v = feat_v.permute(0, 2, 1).contiguous() # bsz * W *", "function to: 1. convert tuple value of feat.size to int, making model exportable.", "in mmocr valid_step = torch.ceil(T * valid_ratio).long() - 1 valid_hf.append(holistic_feat[i, valid_step, :]) valid_hf", "the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. Returns: holistic_feat (Tensor): A feature map", "@FUNCTION_REWRITER.register_rewriter( func_name='mmocr.models.textrecog.encoders.SAREncoder.forward', backend='default') def sar_encoder__forward(ctx, self, feat, img_metas=None): \"\"\"Rewrite `forward` of SAREncoder for", "use torch.ceil to replace original math.ceil and if else in mmocr valid_step =", "replace original math.ceil and if else in mmocr. Args: ctx (ContextCaller): The context", ":]) valid_hf = torch.stack(valid_hf, dim=0) else: valid_hf = holistic_feat[:, -1, :] # bsz", "import torch import torch.nn.functional as F from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='mmocr.models.textrecog.encoders.SAREncoder.forward', backend='default')", "feat.size to int, making model exportable. 2. use torch.ceil to replace original math.ceil", "if img_metas is not None: assert utils.is_type_list(img_metas, dict) assert len(img_metas) == feat.size(0) valid_ratios", "for img_meta in img_metas ] if self.mask else None h_feat = int(feat.size(2)) feat_v", "feat_v = feat_v.permute(0, 2, 1).contiguous() # bsz * W * C holistic_feat =", "OpenMMLab. All rights reserved. import mmocr.utils as utils import torch import torch.nn.functional as", "of the class SAREncoder. feat (Tensor): Encoded feature map of shape (N, C,", "C, H, W). img_metas (Optional[list[dict]]): A list of image info dict where each", "A list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip',", "(Tensor): A feature map output from SAREncoder. The shape [N, M]. \"\"\" if", "h_feat = int(feat.size(2)) feat_v = F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1, padding=0) feat_v = feat_v.squeeze(2)", "the class SAREncoder. feat (Tensor): Encoded feature map of shape (N, C, H,", "math.ceil and if else in mmocr. Args: ctx (ContextCaller): The context with additional", "assert utils.is_type_list(img_metas, dict) assert len(img_metas) == feat.size(0) valid_ratios = None if img_metas is", "holistic_feat (Tensor): A feature map output from SAREncoder. The shape [N, M]. \"\"\"", "Encoded feature map of shape (N, C, H, W). img_metas (Optional[list[dict]]): A list", "these keys, see :class:`mmdet.datasets.pipelines.Collect`. Returns: holistic_feat (Tensor): A feature map output from SAREncoder.", "= [ img_meta.get('valid_ratio', 1.0) for img_meta in img_metas ] if self.mask else None", "use torch.ceil to replace original math.ceil and if else in mmocr. Args: ctx", ":class:`mmdet.datasets.pipelines.Collect`. Returns: holistic_feat (Tensor): A feature map output from SAREncoder. The shape [N,", "H, W). img_metas (Optional[list[dict]]): A list of image info dict where each dict", "additional information. self: The instance of the class SAREncoder. feat (Tensor): Encoded feature", "Args: ctx (ContextCaller): The context with additional information. self: The instance of the", "context with additional information. self: The instance of the class SAREncoder. feat (Tensor):", "original math.ceil and if else in mmocr valid_step = torch.ceil(T * valid_ratio).long() -", "details on the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. Returns: holistic_feat (Tensor): A", "reserved. import mmocr.utils as utils import torch import torch.nn.functional as F from mmdeploy.core", "* W * C holistic_feat = self.rnn_encoder(feat_v)[0] # bsz * T * C", "valid_ratio).long() - 1 valid_hf.append(holistic_feat[i, valid_step, :]) valid_hf = torch.stack(valid_hf, dim=0) else: valid_hf =", "'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys, see", "feat_v.permute(0, 2, 1).contiguous() # bsz * W * C holistic_feat = self.rnn_encoder(feat_v)[0] #", "2, 1).contiguous() # bsz * W * C holistic_feat = self.rnn_encoder(feat_v)[0] # bsz", "is not None: assert utils.is_type_list(img_metas, dict) assert len(img_metas) == feat.size(0) valid_ratios = None", "of feat.size to int, making model exportable. 2. use torch.ceil to replace original", "'img_norm_cfg'. For details on the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. Returns: holistic_feat", "list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and", "Returns: holistic_feat (Tensor): A feature map output from SAREncoder. The shape [N, M].", "self.mask else None h_feat = int(feat.size(2)) feat_v = F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1, padding=0)", "exportable. 2. use torch.ceil to replace original math.ceil and if else in mmocr.", "Copyright (c) OpenMMLab. All rights reserved. import mmocr.utils as utils import torch import", "T = holistic_feat.size(1) for i, valid_ratio in enumerate(valid_ratios): # use torch.ceil to replace", "'pad_shape', and 'img_norm_cfg'. For details on the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`.", "valid_hf = holistic_feat[:, -1, :] # bsz * C holistic_feat = self.linear(valid_hf) #", "(N, C, H, W). img_metas (Optional[list[dict]]): A list of image info dict where", "# bsz * W * C holistic_feat = self.rnn_encoder(feat_v)[0] # bsz * T", "with additional information. self: The instance of the class SAREncoder. feat (Tensor): Encoded", "bsz * C * W feat_v = feat_v.permute(0, 2, 1).contiguous() # bsz *", "valid_ratios = None if img_metas is not None: valid_ratios = [ img_meta.get('valid_ratio', 1.0)", "is not None: valid_hf = [] T = holistic_feat.size(1) for i, valid_ratio in", "dim=0) else: valid_hf = holistic_feat[:, -1, :] # bsz * C holistic_feat =", "not None: assert utils.is_type_list(img_metas, dict) assert len(img_metas) == feat.size(0) valid_ratios = None if", "self, feat, img_metas=None): \"\"\"Rewrite `forward` of SAREncoder for default backend. Rewrite this function", "enumerate(valid_ratios): # use torch.ceil to replace original math.ceil and if else in mmocr", "feat_v.squeeze(2) # bsz * C * W feat_v = feat_v.permute(0, 2, 1).contiguous() #", "model exportable. 2. use torch.ceil to replace original math.ceil and if else in", "\"\"\" if img_metas is not None: assert utils.is_type_list(img_metas, dict) assert len(img_metas) == feat.size(0)", "information. self: The instance of the class SAREncoder. feat (Tensor): Encoded feature map", "and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the", "'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For", "to int, making model exportable. 2. use torch.ceil to replace original math.ceil and", "not None: valid_ratios = [ img_meta.get('valid_ratio', 1.0) for img_meta in img_metas ] if", "F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1, padding=0) feat_v = feat_v.squeeze(2) # bsz * C *", "see :class:`mmdet.datasets.pipelines.Collect`. Returns: holistic_feat (Tensor): A feature map output from SAREncoder. The shape", "in mmocr. Args: ctx (ContextCaller): The context with additional information. self: The instance", "also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of", "assert len(img_metas) == feat.size(0) valid_ratios = None if img_metas is not None: valid_ratios", "in enumerate(valid_ratios): # use torch.ceil to replace original math.ceil and if else in", "torch.stack(valid_hf, dim=0) else: valid_hf = holistic_feat[:, -1, :] # bsz * C holistic_feat", "A feature map output from SAREncoder. The shape [N, M]. \"\"\" if img_metas", "of SAREncoder for default backend. Rewrite this function to: 1. convert tuple value", "else None h_feat = int(feat.size(2)) feat_v = F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1, padding=0) feat_v", "i, valid_ratio in enumerate(valid_ratios): # use torch.ceil to replace original math.ceil and if", "1.0) for img_meta in img_metas ] if self.mask else None h_feat = int(feat.size(2))", "# Copyright (c) OpenMMLab. All rights reserved. import mmocr.utils as utils import torch", "holistic_feat[:, -1, :] # bsz * C holistic_feat = self.linear(valid_hf) # bsz *", "keys, see :class:`mmdet.datasets.pipelines.Collect`. Returns: holistic_feat (Tensor): A feature map output from SAREncoder. The", "import torch.nn.functional as F from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='mmocr.models.textrecog.encoders.SAREncoder.forward', backend='default') def sar_encoder__forward(ctx,", "None: valid_hf = [] T = holistic_feat.size(1) for i, valid_ratio in enumerate(valid_ratios): #", "mmocr.utils as utils import torch import torch.nn.functional as F from mmdeploy.core import FUNCTION_REWRITER", "valid_hf = [] T = holistic_feat.size(1) for i, valid_ratio in enumerate(valid_ratios): # use", "All rights reserved. import mmocr.utils as utils import torch import torch.nn.functional as F", "default backend. Rewrite this function to: 1. convert tuple value of feat.size to", "'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on", "valid_ratios = [ img_meta.get('valid_ratio', 1.0) for img_meta in img_metas ] if self.mask else", "as F from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='mmocr.models.textrecog.encoders.SAREncoder.forward', backend='default') def sar_encoder__forward(ctx, self, feat,", "original math.ceil and if else in mmocr. Args: ctx (ContextCaller): The context with", "not None: valid_hf = [] T = holistic_feat.size(1) for i, valid_ratio in enumerate(valid_ratios):", "None: valid_ratios = [ img_meta.get('valid_ratio', 1.0) for img_meta in img_metas ] if self.mask", "W * C holistic_feat = self.rnn_encoder(feat_v)[0] # bsz * T * C if", "[ img_meta.get('valid_ratio', 1.0) for img_meta in img_metas ] if self.mask else None h_feat", "has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.", "self: The instance of the class SAREncoder. feat (Tensor): Encoded feature map of", "contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these", "map output from SAREncoder. The shape [N, M]. \"\"\" if img_metas is not", "utils.is_type_list(img_metas, dict) assert len(img_metas) == feat.size(0) valid_ratios = None if img_metas is not", "W feat_v = feat_v.permute(0, 2, 1).contiguous() # bsz * W * C holistic_feat", "= holistic_feat[:, -1, :] # bsz * C holistic_feat = self.linear(valid_hf) # bsz", "[] T = holistic_feat.size(1) for i, valid_ratio in enumerate(valid_ratios): # use torch.ceil to", "The context with additional information. self: The instance of the class SAREncoder. feat", "mmocr. Args: ctx (ContextCaller): The context with additional information. self: The instance of", "of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may", "backend. Rewrite this function to: 1. convert tuple value of feat.size to int,", "feat (Tensor): Encoded feature map of shape (N, C, H, W). img_metas (Optional[list[dict]]):", "shape (N, C, H, W). img_metas (Optional[list[dict]]): A list of image info dict", "kernel_size=(h_feat, 1), stride=1, padding=0) feat_v = feat_v.squeeze(2) # bsz * C * W", "bsz * T * C if valid_ratios is not None: valid_hf = []", "= torch.stack(valid_hf, dim=0) else: valid_hf = holistic_feat[:, -1, :] # bsz * C", "img_meta in img_metas ] if self.mask else None h_feat = int(feat.size(2)) feat_v =", "len(img_metas) == feat.size(0) valid_ratios = None if img_metas is not None: valid_ratios =", "to replace original math.ceil and if else in mmocr. Args: ctx (ContextCaller): The", "(ContextCaller): The context with additional information. self: The instance of the class SAREncoder.", "each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape',", "None h_feat = int(feat.size(2)) feat_v = F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1, padding=0) feat_v =", "sar_encoder__forward(ctx, self, feat, img_metas=None): \"\"\"Rewrite `forward` of SAREncoder for default backend. Rewrite this", "replace original math.ceil and if else in mmocr valid_step = torch.ceil(T * valid_ratio).long()", "valid_hf.append(holistic_feat[i, valid_step, :]) valid_hf = torch.stack(valid_hf, dim=0) else: valid_hf = holistic_feat[:, -1, :]", "dict) assert len(img_metas) == feat.size(0) valid_ratios = None if img_metas is not None:", "feat, img_metas=None): \"\"\"Rewrite `forward` of SAREncoder for default backend. Rewrite this function to:", "None: assert utils.is_type_list(img_metas, dict) assert len(img_metas) == feat.size(0) valid_ratios = None if img_metas", "- 1 valid_hf.append(holistic_feat[i, valid_step, :]) valid_hf = torch.stack(valid_hf, dim=0) else: valid_hf = holistic_feat[:,", "in img_metas ] if self.mask else None h_feat = int(feat.size(2)) feat_v = F.max_pool2d(feat,", "feat_v = F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1, padding=0) feat_v = feat_v.squeeze(2) # bsz *", "valid_hf = torch.stack(valid_hf, dim=0) else: valid_hf = holistic_feat[:, -1, :] # bsz *", "from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='mmocr.models.textrecog.encoders.SAREncoder.forward', backend='default') def sar_encoder__forward(ctx, self, feat, img_metas=None): \"\"\"Rewrite", "func_name='mmocr.models.textrecog.encoders.SAREncoder.forward', backend='default') def sar_encoder__forward(ctx, self, feat, img_metas=None): \"\"\"Rewrite `forward` of SAREncoder for default", "\"\"\"Rewrite `forward` of SAREncoder for default backend. Rewrite this function to: 1. convert", "torch.ceil to replace original math.ceil and if else in mmocr. Args: ctx (ContextCaller):", "backend='default') def sar_encoder__forward(ctx, self, feat, img_metas=None): \"\"\"Rewrite `forward` of SAREncoder for default backend.", "convert tuple value of feat.size to int, making model exportable. 2. use torch.ceil", "dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename',", "] if self.mask else None h_feat = int(feat.size(2)) feat_v = F.max_pool2d(feat, kernel_size=(h_feat, 1),", "= F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1, padding=0) feat_v = feat_v.squeeze(2) # bsz * C", "1), stride=1, padding=0) feat_v = feat_v.squeeze(2) # bsz * C * W feat_v", "of these keys, see :class:`mmdet.datasets.pipelines.Collect`. Returns: holistic_feat (Tensor): A feature map output from", "torch.ceil to replace original math.ceil and if else in mmocr valid_step = torch.ceil(T", "= None if img_metas is not None: valid_ratios = [ img_meta.get('valid_ratio', 1.0) for", "self.rnn_encoder(feat_v)[0] # bsz * T * C if valid_ratios is not None: valid_hf", "if else in mmocr. Args: ctx (ContextCaller): The context with additional information. self:", "SAREncoder. The shape [N, M]. \"\"\" if img_metas is not None: assert utils.is_type_list(img_metas,", "SAREncoder. feat (Tensor): Encoded feature map of shape (N, C, H, W). img_metas", "1).contiguous() # bsz * W * C holistic_feat = self.rnn_encoder(feat_v)[0] # bsz *", "as utils import torch import torch.nn.functional as F from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter(", "* C holistic_feat = self.rnn_encoder(feat_v)[0] # bsz * T * C if valid_ratios", "else: valid_hf = holistic_feat[:, -1, :] # bsz * C holistic_feat = self.linear(valid_hf)", "int, making model exportable. 2. use torch.ceil to replace original math.ceil and if", "-1, :] # bsz * C holistic_feat = self.linear(valid_hf) # bsz * C", "value of feat.size to int, making model exportable. 2. use torch.ceil to replace", "= torch.ceil(T * valid_ratio).long() - 1 valid_hf.append(holistic_feat[i, valid_step, :]) valid_hf = torch.stack(valid_hf, dim=0)", "* T * C if valid_ratios is not None: valid_hf = [] T", "C if valid_ratios is not None: valid_hf = [] T = holistic_feat.size(1) for", "'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details", "else in mmocr. Args: ctx (ContextCaller): The context with additional information. self: The", "T * C if valid_ratios is not None: valid_hf = [] T =", "(c) OpenMMLab. All rights reserved. import mmocr.utils as utils import torch import torch.nn.functional", "is not None: valid_ratios = [ img_meta.get('valid_ratio', 1.0) for img_meta in img_metas ]", "if valid_ratios is not None: valid_hf = [] T = holistic_feat.size(1) for i,", "= int(feat.size(2)) feat_v = F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1, padding=0) feat_v = feat_v.squeeze(2) #", "feat_v = feat_v.squeeze(2) # bsz * C * W feat_v = feat_v.permute(0, 2,", "img_metas=None): \"\"\"Rewrite `forward` of SAREncoder for default backend. Rewrite this function to: 1.", "map of shape (N, C, H, W). img_metas (Optional[list[dict]]): A list of image", "M]. \"\"\" if img_metas is not None: assert utils.is_type_list(img_metas, dict) assert len(img_metas) ==", "img_metas is not None: assert utils.is_type_list(img_metas, dict) assert len(img_metas) == feat.size(0) valid_ratios =", "SAREncoder for default backend. Rewrite this function to: 1. convert tuple value of", "[N, M]. \"\"\" if img_metas is not None: assert utils.is_type_list(img_metas, dict) assert len(img_metas)", "valid_step = torch.ceil(T * valid_ratio).long() - 1 valid_hf.append(holistic_feat[i, valid_step, :]) valid_hf = torch.stack(valid_hf,", "mmocr valid_step = torch.ceil(T * valid_ratio).long() - 1 valid_hf.append(holistic_feat[i, valid_step, :]) valid_hf =", "feature map of shape (N, C, H, W). img_metas (Optional[list[dict]]): A list of", "ctx (ContextCaller): The context with additional information. self: The instance of the class", "for i, valid_ratio in enumerate(valid_ratios): # use torch.ceil to replace original math.ceil and", "'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys,", "FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='mmocr.models.textrecog.encoders.SAREncoder.forward', backend='default') def sar_encoder__forward(ctx, self, feat, img_metas=None): \"\"\"Rewrite `forward` of SAREncoder", "C holistic_feat = self.rnn_encoder(feat_v)[0] # bsz * T * C if valid_ratios is", "import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='mmocr.models.textrecog.encoders.SAREncoder.forward', backend='default') def sar_encoder__forward(ctx, self, feat, img_metas=None): \"\"\"Rewrite `forward` of", "and 'img_norm_cfg'. For details on the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. Returns:", "on the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. Returns: holistic_feat (Tensor): A feature", "* W feat_v = feat_v.permute(0, 2, 1).contiguous() # bsz * W * C", "(Optional[list[dict]]): A list of image info dict where each dict has: 'img_shape', 'scale_factor',", "instance of the class SAREncoder. feat (Tensor): Encoded feature map of shape (N,", "W). img_metas (Optional[list[dict]]): A list of image info dict where each dict has:", "F from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='mmocr.models.textrecog.encoders.SAREncoder.forward', backend='default') def sar_encoder__forward(ctx, self, feat, img_metas=None):", "from SAREncoder. The shape [N, M]. \"\"\" if img_metas is not None: assert", "stride=1, padding=0) feat_v = feat_v.squeeze(2) # bsz * C * W feat_v =", "tuple value of feat.size to int, making model exportable. 2. use torch.ceil to", "img_metas is not None: valid_ratios = [ img_meta.get('valid_ratio', 1.0) for img_meta in img_metas", "# bsz * T * C if valid_ratios is not None: valid_hf =", "padding=0) feat_v = feat_v.squeeze(2) # bsz * C * W feat_v = feat_v.permute(0,", "dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and", "may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values", "for default backend. Rewrite this function to: 1. convert tuple value of feat.size", "`forward` of SAREncoder for default backend. Rewrite this function to: 1. convert tuple", "image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also", "utils import torch import torch.nn.functional as F from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='mmocr.models.textrecog.encoders.SAREncoder.forward',", "img_meta.get('valid_ratio', 1.0) for img_meta in img_metas ] if self.mask else None h_feat =", "img_metas ] if self.mask else None h_feat = int(feat.size(2)) feat_v = F.max_pool2d(feat, kernel_size=(h_feat,", "feat.size(0) valid_ratios = None if img_metas is not None: valid_ratios = [ img_meta.get('valid_ratio',", "1 valid_hf.append(holistic_feat[i, valid_step, :]) valid_hf = torch.stack(valid_hf, dim=0) else: valid_hf = holistic_feat[:, -1,", "math.ceil and if else in mmocr valid_step = torch.ceil(T * valid_ratio).long() - 1" ]
[ "self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_text(root) self.assertEqual(t, expected) def test_basic1(self): self.backcvt(\"\", \"\")", "def test_basic4(self): self.backcvt(\"{{\", \"{{\") def test_title1(self): self.backcvt(\"== T1 ==\\nxyz\\n\", \"\\n== T1 ==\\n\\nxyz\\n\") def", "test_tablecaption1(self): self.backcvt(\"{|\\n|+\\ncapt\\n|}\", \"\\n{| \\n\\n|+ \\n\\ncapt\\n\\n|}\\n\") def test_tablerowcell1(self): self.backcvt(\"{|\\n|- a=1\\n| cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n|", "self.assertEqual(ctx.warnings, []) t = ctx.node_to_text(root) self.assertEqual(t, expected) def test_basic1(self): self.backcvt(\"\", \"\") def test_basic2(self):", "test_basic1(self): self.backcvt(\"\", \"\") def test_basic2(self): self.backcvt(\"foo bar\\nxyz\\n\", \"foo bar\\nxyz\\n\") def test_basic3(self): self.backcvt(\"&amp;amp;\", \"&amp;amp;\")", "def test_title1(self): self.backcvt(\"== T1 ==\\nxyz\\n\", \"\\n== T1 ==\\n\\nxyz\\n\") def test_title2(self): self.backcvt(\"=== T1 ===\\nxyz\\n\",", "def test_basic3(self): self.backcvt(\"&amp;amp;\", \"&amp;amp;\") def test_basic4(self): self.backcvt(\"{{\", \"{{\") def test_title1(self): self.backcvt(\"== T1 ==\\nxyz\\n\",", "def test_italic1(self): self.backcvt(\"''i''\", \"''i''\") def test_bold1(self): self.backcvt(\"''b''\", \"''b''\") def test_text1(self): self.totext(\"\", \"\") def", "self.backcvt(\"{{foo|a|b|c=4|{{{arg}}}}}\", \"{{foo|a|b|c=4|{{{arg}}}}}\") def test_template2(self): self.backcvt(\"{{foo}}\", \"{{foo}}\") def test_template3(self): self.backcvt(\"{{!}}\", \"{{!}}\") def test_templatearg1(self): self.backcvt(\"{{{1}}}\",", "\"\\n{| \\n\\n|}\\n\") def test_table2(self): self.backcvt('{| class=\"x\"\\n|}', '\\n{| class=\"x\"\\n\\n|}\\n') def test_tablecaption1(self): self.backcvt(\"{|\\n|+\\ncapt\\n|}\", \"\\n{| \\n\\n|+", "\"\\n==== T1 ====\\n\\nxyz\\n\") def test_title4(self): self.backcvt(\"===== T1 =====\\nxyz\\n\", \"\\n===== T1 =====\\n\\nxyz\\n\") def test_title5(self):", "self.backcvt(\"====== T1 ======\\nxyz\\n\", \"\\n====== T1 ======\\n\\nxyz\\n\") def test_hline1(self): self.backcvt(\"aaa\\n----\\nbbbb\", \"aaa\\n\\n----\\n\\nbbbb\") def test_list1(self): self.backcvt(\"*a\\n*", "def test_html1(self): self.backcvt('a<span class=\"bar\">foo</span>b', 'a<span class=\"bar\">foo</span>b') def test_italic1(self): self.backcvt(\"''i''\", \"''i''\") def test_bold1(self): self.backcvt(\"''b''\",", "c\\n*# d\\n* b\\ndef\", \"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\") def test_list4(self): self.backcvt(\"abc\\n*a\\n**b\\n*:c\\n\", \"abc\\n*a\\n**b\\n*:c\\n\") def test_pre1(self):", "\"{{#expr: 1 + 2}}\") def test_parserfn2(self): self.backcvt(\"{{#expr:1+{{v}}}}\", \"{{#expr:1+{{v}}}}\") def test_parserfn3(self): self.backcvt(\"{{ROOTPAGENAME}}\", \"{{ROOTPAGENAME:}}\") def", "self.backcvt(\"a<b>foo</b>b\", \"a<b>foo</b>b\") def test_html1(self): self.backcvt('a<span class=\"bar\">foo</span>b', 'a<span class=\"bar\">foo</span>b') def test_italic1(self): self.backcvt(\"''i''\", \"''i''\") def", "test_magicword1(self): self.backcvt(\"a\\n__TOC__\\nb\", \"a\\n\\n__TOC__\\n\\nb\") def test_html1(self): self.backcvt(\"a<b>foo</b>b\", \"a<b>foo</b>b\") def test_html1(self): self.backcvt('a<span class=\"bar\">foo</span>b', 'a<span class=\"bar\">foo</span>b')", "NodeExpTests(unittest.TestCase): def backcvt(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings,", "self.backcvt(\"a [[foo]]s bar\", \"a [[foo]]s bar\") def test_template1(self): self.backcvt(\"{{foo|a|b|c=4|{{{arg}}}}}\", \"{{foo|a|b|c=4|{{{arg}}}}}\") def test_template2(self): self.backcvt(\"{{foo}}\",", "b\\n\") def test_list2(self): self.backcvt(\"abc\\n*a\\n* b\\ndef\", \"abc\\n*a\\n* b\\ndef\") def test_list3(self): self.backcvt(\"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\",", "[]) t = ctx.node_to_wikitext(root) self.assertEqual(t, expected) def tohtml(self, text, expected): root, ctx =", "\"{{foo|a|b|c=4|{{{arg}}}}}\") def test_template2(self): self.backcvt(\"{{foo}}\", \"{{foo}}\") def test_template3(self): self.backcvt(\"{{!}}\", \"{{!}}\") def test_templatearg1(self): self.backcvt(\"{{{1}}}\", \"{{{1}}}\")", "test_templatearg1(self): self.backcvt(\"{{{1}}}\", \"{{{1}}}\") def test_templatearg1(self): self.backcvt(\"{{{{{templ}}}}}\", \"{{{{{templ}}}}}\") def test_templatearg2(self): self.backcvt(\"{{{a|def}}}\", \"{{{a|def}}}\") def test_templatearg3(self):", "self.backcvt(\"{{{a|def}}}\", \"{{{a|def}}}\") def test_templatearg3(self): self.backcvt(\"{{{a|}}}\", \"{{{a|}}}\") def test_parserfn1(self): self.backcvt(\"{{#expr: 1 + 2}}\", \"{{#expr:", "def test_template2(self): self.backcvt(\"{{foo}}\", \"{{foo}}\") def test_template3(self): self.backcvt(\"{{!}}\", \"{{!}}\") def test_templatearg1(self): self.backcvt(\"{{{1}}}\", \"{{{1}}}\") def", "bar\") def test_template1(self): self.backcvt(\"{{foo|a|b|c=4|{{{arg}}}}}\", \"{{foo|a|b|c=4|{{{arg}}}}}\") def test_template2(self): self.backcvt(\"{{foo}}\", \"{{foo}}\") def test_template3(self): self.backcvt(\"{{!}}\", \"{{!}}\")", "test_text2(self): self.totext(\"\\nfoo bar \", \"foo bar\") def test_text3(self): self.totext(\"<b>foo</b>\", \"foo\") def test_text4(self): self.totext(\"<h1>foo</h1><p>bar</p>\",", "test_link1(self): self.backcvt(\"[[foo bar]]\", \"[[foo bar]]\") def test_link2(self): self.backcvt(\"[[foo|bar]]\", \"[[foo|bar]]\") def test_link3(self): self.backcvt(\"a [[foo]]s", "bar\", \"a [[foo]]s bar\") def test_template1(self): self.backcvt(\"{{foo|a|b|c=4|{{{arg}}}}}\", \"{{foo|a|b|c=4|{{{arg}}}}}\") def test_template2(self): self.backcvt(\"{{foo}}\", \"{{foo}}\") def", "\"{{#expr:1+{{v}}}}\") def test_parserfn3(self): self.backcvt(\"{{ROOTPAGENAME}}\", \"{{ROOTPAGENAME:}}\") def test_url1(self): self.backcvt(\"[https://wikipedia.org]\", \"[https://wikipedia.org]\") def test_url2(self): self.backcvt(\"https://wikipedia.org/\", \"[https://wikipedia.org/]\")", "\"\") def test_basic2(self): self.backcvt(\"foo bar\\nxyz\\n\", \"foo bar\\nxyz\\n\") def test_basic3(self): self.backcvt(\"&amp;amp;\", \"&amp;amp;\") def test_basic4(self):", "\\n\\n|- a=\"1\"\\n\\n| cell\\n\\n\\n|}\\n') def test_tablerowhdr1(self): self.backcvt(\"{|\\n|- a=1\\n! cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n! cell\\n\\n\\n|}\\n') def", "test_basic2(self): self.backcvt(\"foo bar\\nxyz\\n\", \"foo bar\\nxyz\\n\") def test_basic3(self): self.backcvt(\"&amp;amp;\", \"&amp;amp;\") def test_basic4(self): self.backcvt(\"{{\", \"{{\")", "T1 ======\\nxyz\\n\", \"\\n====== T1 ======\\n\\nxyz\\n\") def test_hline1(self): self.backcvt(\"aaa\\n----\\nbbbb\", \"aaa\\n\\n----\\n\\nbbbb\") def test_list1(self): self.backcvt(\"*a\\n* b\\n\",", "t = ctx.node_to_wikitext(root) self.assertEqual(t, expected) def tohtml(self, text, expected): root, ctx = parse_with_ctx(\"test\",", "assert isinstance(root, WikiNode) assert isinstance(ctx, Wtp) return root class NodeExpTests(unittest.TestCase): def backcvt(self, text,", "self.backcvt(\"{{{1}}}\", \"{{{1}}}\") def test_templatearg1(self): self.backcvt(\"{{{{{templ}}}}}\", \"{{{{{templ}}}}}\") def test_templatearg2(self): self.backcvt(\"{{{a|def}}}\", \"{{{a|def}}}\") def test_templatearg3(self): self.backcvt(\"{{{a|}}}\",", "and https://ylonen.org import unittest from wikitextprocessor import Wtp from wikitextprocessor.parser import (print_tree, NodeKind,", "b\\ndef\") def test_list4(self): self.backcvt(\"abc\\n*a\\n**b\\n*:c\\n\", \"abc\\n*a\\n**b\\n*:c\\n\") def test_pre1(self): self.backcvt(\"a<pre>foo\\n bar</pre>b\", \"a<pre>foo\\n bar</pre>b\") def test_preformatted1(self):", "= ctx.parse(text, **kwargs) print(\"parse_with_ctx: root\", type(root), root) return root, ctx def parse(title, text,", "root, ctx = parse_with_ctx(title, text, **kwargs) assert isinstance(root, WikiNode) assert isinstance(ctx, Wtp) return", "d\\n* b\\ndef\") def test_list4(self): self.backcvt(\"abc\\n*a\\n**b\\n*:c\\n\", \"abc\\n*a\\n**b\\n*:c\\n\") def test_pre1(self): self.backcvt(\"a<pre>foo\\n bar</pre>b\", \"a<pre>foo\\n bar</pre>b\") def", "ctx.node_to_html(root) self.assertEqual(t, expected) def totext(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors,", "root\", type(root), root) return root, ctx def parse(title, text, **kwargs): root, ctx =", "self.assertEqual(ctx.warnings, []) t = ctx.node_to_wikitext(root) self.assertEqual(t, expected) def tohtml(self, text, expected): root, ctx", "ctx = Wtp() ctx.analyze_templates() ctx.start_page(title) root = ctx.parse(text, **kwargs) print(\"parse_with_ctx: root\", type(root), root)", "self.backcvt(\"*a\\n* b\\n\", \"*a\\n* b\\n\") def test_list2(self): self.backcvt(\"abc\\n*a\\n* b\\ndef\", \"abc\\n*a\\n* b\\ndef\") def test_list3(self): self.backcvt(\"abc\\n*a\\n*#", "parse_with_ctx(title, text, **kwargs): assert isinstance(title, str) assert isinstance(text, str) ctx = Wtp() ctx.analyze_templates()", "test_hline1(self): self.backcvt(\"aaa\\n----\\nbbbb\", \"aaa\\n\\n----\\n\\nbbbb\") def test_list1(self): self.backcvt(\"*a\\n* b\\n\", \"*a\\n* b\\n\") def test_list2(self): self.backcvt(\"abc\\n*a\\n* b\\ndef\",", "def test_list4(self): self.backcvt(\"abc\\n*a\\n**b\\n*:c\\n\", \"abc\\n*a\\n**b\\n*:c\\n\") def test_pre1(self): self.backcvt(\"a<pre>foo\\n bar</pre>b\", \"a<pre>foo\\n bar</pre>b\") def test_preformatted1(self): self.backcvt(\"", "def test_url1(self): self.backcvt(\"[https://wikipedia.org]\", \"[https://wikipedia.org]\") def test_url2(self): self.backcvt(\"https://wikipedia.org/\", \"[https://wikipedia.org/]\") def test_url3(self): self.backcvt(\"https://wikipedia.org/x/y?a=7%255\", \"[https://wikipedia.org/x/y?a=7%255]\") def", "ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_html(root) self.assertEqual(t, expected)", "def test_templatearg1(self): self.backcvt(\"{{{{{templ}}}}}\", \"{{{{{templ}}}}}\") def test_templatearg2(self): self.backcvt(\"{{{a|def}}}\", \"{{{a|def}}}\") def test_templatearg3(self): self.backcvt(\"{{{a|}}}\", \"{{{a|}}}\") def", "assert isinstance(title, str) assert isinstance(text, str) ctx = Wtp() ctx.analyze_templates() ctx.start_page(title) root =", "\" a\\n b\") def test_link1(self): self.backcvt(\"[[foo bar]]\", \"[[foo bar]]\") def test_link2(self): self.backcvt(\"[[foo|bar]]\", \"[[foo|bar]]\")", "====\\n\\nxyz\\n\") def test_title4(self): self.backcvt(\"===== T1 =====\\nxyz\\n\", \"\\n===== T1 =====\\n\\nxyz\\n\") def test_title5(self): self.backcvt(\"====== T1", "==\\n\\nxyz\\n\") def test_title2(self): self.backcvt(\"=== T1 ===\\nxyz\\n\", \"\\n=== T1 ===\\n\\nxyz\\n\") def test_title3(self): self.backcvt(\"==== T1", "test_text3(self): self.totext(\"<b>foo</b>\", \"foo\") def test_text4(self): self.totext(\"<h1>foo</h1><p>bar</p>\", \"foo\\n\\nbar\") def test_text5(self): self.totext(\"foo<ref x=1>bar</ref> z\", \"foo", "test_list2(self): self.backcvt(\"abc\\n*a\\n* b\\ndef\", \"abc\\n*a\\n* b\\ndef\") def test_list3(self): self.backcvt(\"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\", \"abc\\n*a\\n*# c\\n*#", "test_italic1(self): self.backcvt(\"''i''\", \"''i''\") def test_bold1(self): self.backcvt(\"''b''\", \"''b''\") def test_text1(self): self.totext(\"\", \"\") def test_text2(self):", "self.backcvt(\"===== T1 =====\\nxyz\\n\", \"\\n===== T1 =====\\n\\nxyz\\n\") def test_title5(self): self.backcvt(\"====== T1 ======\\nxyz\\n\", \"\\n====== T1", "test_list4(self): self.backcvt(\"abc\\n*a\\n**b\\n*:c\\n\", \"abc\\n*a\\n**b\\n*:c\\n\") def test_pre1(self): self.backcvt(\"a<pre>foo\\n bar</pre>b\", \"a<pre>foo\\n bar</pre>b\") def test_preformatted1(self): self.backcvt(\" a\\n", "1 + 2}}\", \"{{#expr: 1 + 2}}\") def test_parserfn2(self): self.backcvt(\"{{#expr:1+{{v}}}}\", \"{{#expr:1+{{v}}}}\") def test_parserfn3(self):", "b\") def test_link1(self): self.backcvt(\"[[foo bar]]\", \"[[foo bar]]\") def test_link2(self): self.backcvt(\"[[foo|bar]]\", \"[[foo|bar]]\") def test_link3(self):", "|}\", \"\\n{| \\n\\n|}\\n\") def test_table2(self): self.backcvt('{| class=\"x\"\\n|}', '\\n{| class=\"x\"\\n\\n|}\\n') def test_tablecaption1(self): self.backcvt(\"{|\\n|+\\ncapt\\n|}\", \"\\n{|", "test_parserfn3(self): self.backcvt(\"{{ROOTPAGENAME}}\", \"{{ROOTPAGENAME:}}\") def test_url1(self): self.backcvt(\"[https://wikipedia.org]\", \"[https://wikipedia.org]\") def test_url2(self): self.backcvt(\"https://wikipedia.org/\", \"[https://wikipedia.org/]\") def test_url3(self):", "\"{{ROOTPAGENAME:}}\") def test_url1(self): self.backcvt(\"[https://wikipedia.org]\", \"[https://wikipedia.org]\") def test_url2(self): self.backcvt(\"https://wikipedia.org/\", \"[https://wikipedia.org/]\") def test_url3(self): self.backcvt(\"https://wikipedia.org/x/y?a=7%255\", \"[https://wikipedia.org/x/y?a=7%255]\")", "test_link2(self): self.backcvt(\"[[foo|bar]]\", \"[[foo|bar]]\") def test_link3(self): self.backcvt(\"a [[foo]]s bar\", \"a [[foo]]s bar\") def test_template1(self):", "text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_wikitext(root) self.assertEqual(t, expected) def tohtml(self, text,", "self.totext(\"<b>foo</b>\", \"foo\") def test_text4(self): self.totext(\"<h1>foo</h1><p>bar</p>\", \"foo\\n\\nbar\") def test_text5(self): self.totext(\"foo<ref x=1>bar</ref> z\", \"foo z\")", "[[foo]]s bar\") def test_template1(self): self.backcvt(\"{{foo|a|b|c=4|{{{arg}}}}}\", \"{{foo|a|b|c=4|{{{arg}}}}}\") def test_template2(self): self.backcvt(\"{{foo}}\", \"{{foo}}\") def test_template3(self): self.backcvt(\"{{!}}\",", "**kwargs) assert isinstance(root, WikiNode) assert isinstance(ctx, Wtp) return root class NodeExpTests(unittest.TestCase): def backcvt(self,", "(c) 2020-2021 <NAME>. See file LICENSE and https://ylonen.org import unittest from wikitextprocessor import", "bar \", \"foo bar\") def test_text3(self): self.totext(\"<b>foo</b>\", \"foo\") def test_text4(self): self.totext(\"<h1>foo</h1><p>bar</p>\", \"foo\\n\\nbar\") def", "totext(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t", "=====\\n\\nxyz\\n\") def test_title5(self): self.backcvt(\"====== T1 ======\\nxyz\\n\", \"\\n====== T1 ======\\n\\nxyz\\n\") def test_hline1(self): self.backcvt(\"aaa\\n----\\nbbbb\", \"aaa\\n\\n----\\n\\nbbbb\")", "===\\nxyz\\n\", \"\\n=== T1 ===\\n\\nxyz\\n\") def test_title3(self): self.backcvt(\"==== T1 ====\\nxyz\\n\", \"\\n==== T1 ====\\n\\nxyz\\n\") def", "def test_templatearg3(self): self.backcvt(\"{{{a|}}}\", \"{{{a|}}}\") def test_parserfn1(self): self.backcvt(\"{{#expr: 1 + 2}}\", \"{{#expr: 1 +", "test_html1(self): self.backcvt('a<span class=\"bar\">foo</span>b', 'a<span class=\"bar\">foo</span>b') def test_italic1(self): self.backcvt(\"''i''\", \"''i''\") def test_bold1(self): self.backcvt(\"''b''\", \"''b''\")", "def test_title3(self): self.backcvt(\"==== T1 ====\\nxyz\\n\", \"\\n==== T1 ====\\n\\nxyz\\n\") def test_title4(self): self.backcvt(\"===== T1 =====\\nxyz\\n\",", "b\\n\", \"*a\\n* b\\n\") def test_list2(self): self.backcvt(\"abc\\n*a\\n* b\\ndef\", \"abc\\n*a\\n* b\\ndef\") def test_list3(self): self.backcvt(\"abc\\n*a\\n*# c\\n*#", "\"abc\\n*a\\n**b\\n*:c\\n\") def test_pre1(self): self.backcvt(\"a<pre>foo\\n bar</pre>b\", \"a<pre>foo\\n bar</pre>b\") def test_preformatted1(self): self.backcvt(\" a\\n b\", \"", "self.backcvt(\"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\", \"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\") def test_list4(self): self.backcvt(\"abc\\n*a\\n**b\\n*:c\\n\", \"abc\\n*a\\n**b\\n*:c\\n\") def", "ctx.node_to_text(root) self.assertEqual(t, expected) def test_basic1(self): self.backcvt(\"\", \"\") def test_basic2(self): self.backcvt(\"foo bar\\nxyz\\n\", \"foo bar\\nxyz\\n\")", "wikitextprocessor import Wtp from wikitextprocessor.parser import (print_tree, NodeKind, WikiNode) def parse_with_ctx(title, text, **kwargs):", "test_link3(self): self.backcvt(\"a [[foo]]s bar\", \"a [[foo]]s bar\") def test_template1(self): self.backcvt(\"{{foo|a|b|c=4|{{{arg}}}}}\", \"{{foo|a|b|c=4|{{{arg}}}}}\") def test_template2(self):", "[[foo]]s bar\", \"a [[foo]]s bar\") def test_template1(self): self.backcvt(\"{{foo|a|b|c=4|{{{arg}}}}}\", \"{{foo|a|b|c=4|{{{arg}}}}}\") def test_template2(self): self.backcvt(\"{{foo}}\", \"{{foo}}\")", "test_url3(self): self.backcvt(\"https://wikipedia.org/x/y?a=7%255\", \"[https://wikipedia.org/x/y?a=7%255]\") def test_table1(self): self.backcvt(\"{| |}\", \"\\n{| \\n\\n|}\\n\") def test_table2(self): self.backcvt('{| class=\"x\"\\n|}',", "a\\n b\") def test_link1(self): self.backcvt(\"[[foo bar]]\", \"[[foo bar]]\") def test_link2(self): self.backcvt(\"[[foo|bar]]\", \"[[foo|bar]]\") def", "\"\") def test_text2(self): self.totext(\"\\nfoo bar \", \"foo bar\") def test_text3(self): self.totext(\"<b>foo</b>\", \"foo\") def", "WikiNode) def parse_with_ctx(title, text, **kwargs): assert isinstance(title, str) assert isinstance(text, str) ctx =", "[]) self.assertEqual(ctx.warnings, []) t = ctx.node_to_html(root) self.assertEqual(t, expected) def totext(self, text, expected): root,", "def test_parserfn2(self): self.backcvt(\"{{#expr:1+{{v}}}}\", \"{{#expr:1+{{v}}}}\") def test_parserfn3(self): self.backcvt(\"{{ROOTPAGENAME}}\", \"{{ROOTPAGENAME:}}\") def test_url1(self): self.backcvt(\"[https://wikipedia.org]\", \"[https://wikipedia.org]\") def", "test_parserfn2(self): self.backcvt(\"{{#expr:1+{{v}}}}\", \"{{#expr:1+{{v}}}}\") def test_parserfn3(self): self.backcvt(\"{{ROOTPAGENAME}}\", \"{{ROOTPAGENAME:}}\") def test_url1(self): self.backcvt(\"[https://wikipedia.org]\", \"[https://wikipedia.org]\") def test_url2(self):", "bar]]\") def test_link2(self): self.backcvt(\"[[foo|bar]]\", \"[[foo|bar]]\") def test_link3(self): self.backcvt(\"a [[foo]]s bar\", \"a [[foo]]s bar\")", "self.backcvt(\"{{#expr: 1 + 2}}\", \"{{#expr: 1 + 2}}\") def test_parserfn2(self): self.backcvt(\"{{#expr:1+{{v}}}}\", \"{{#expr:1+{{v}}}}\") def", "ctx.node_to_wikitext(root) self.assertEqual(t, expected) def tohtml(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors,", "expected) def totext(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings,", "import (print_tree, NodeKind, WikiNode) def parse_with_ctx(title, text, **kwargs): assert isinstance(title, str) assert isinstance(text,", "\"*a\\n* b\\n\") def test_list2(self): self.backcvt(\"abc\\n*a\\n* b\\ndef\", \"abc\\n*a\\n* b\\ndef\") def test_list3(self): self.backcvt(\"abc\\n*a\\n*# c\\n*# d\\n*", "root class NodeExpTests(unittest.TestCase): def backcvt(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors,", "\\n\\n|+ \\n\\ncapt\\n\\n|}\\n\") def test_tablerowcell1(self): self.backcvt(\"{|\\n|- a=1\\n| cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n| cell\\n\\n\\n|}\\n') def test_tablerowhdr1(self):", "bar\") def test_text3(self): self.totext(\"<b>foo</b>\", \"foo\") def test_text4(self): self.totext(\"<h1>foo</h1><p>bar</p>\", \"foo\\n\\nbar\") def test_text5(self): self.totext(\"foo<ref x=1>bar</ref>", "self.backcvt(\"{|\\n|- a=1\\n! cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n! cell\\n\\n\\n|}\\n') def test_magicword1(self): self.backcvt(\"a\\n__TOC__\\nb\", \"a\\n\\n__TOC__\\n\\nb\") def test_html1(self):", "T1 =====\\nxyz\\n\", \"\\n===== T1 =====\\n\\nxyz\\n\") def test_title5(self): self.backcvt(\"====== T1 ======\\nxyz\\n\", \"\\n====== T1 ======\\n\\nxyz\\n\")", "def test_url2(self): self.backcvt(\"https://wikipedia.org/\", \"[https://wikipedia.org/]\") def test_url3(self): self.backcvt(\"https://wikipedia.org/x/y?a=7%255\", \"[https://wikipedia.org/x/y?a=7%255]\") def test_table1(self): self.backcvt(\"{| |}\", \"\\n{|", "def test_bold1(self): self.backcvt(\"''b''\", \"''b''\") def test_text1(self): self.totext(\"\", \"\") def test_text2(self): self.totext(\"\\nfoo bar \",", "test_tablerowhdr1(self): self.backcvt(\"{|\\n|- a=1\\n! cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n! cell\\n\\n\\n|}\\n') def test_magicword1(self): self.backcvt(\"a\\n__TOC__\\nb\", \"a\\n\\n__TOC__\\n\\nb\") def", "Wtp from wikitextprocessor.parser import (print_tree, NodeKind, WikiNode) def parse_with_ctx(title, text, **kwargs): assert isinstance(title,", "a=\"1\"\\n\\n! cell\\n\\n\\n|}\\n') def test_magicword1(self): self.backcvt(\"a\\n__TOC__\\nb\", \"a\\n\\n__TOC__\\n\\nb\") def test_html1(self): self.backcvt(\"a<b>foo</b>b\", \"a<b>foo</b>b\") def test_html1(self): self.backcvt('a<span", "\"a<pre>foo\\n bar</pre>b\") def test_preformatted1(self): self.backcvt(\" a\\n b\", \" a\\n b\") def test_link1(self): self.backcvt(\"[[foo", "self.backcvt(\"https://wikipedia.org/x/y?a=7%255\", \"[https://wikipedia.org/x/y?a=7%255]\") def test_table1(self): self.backcvt(\"{| |}\", \"\\n{| \\n\\n|}\\n\") def test_table2(self): self.backcvt('{| class=\"x\"\\n|}', '\\n{|", "2}}\") def test_parserfn2(self): self.backcvt(\"{{#expr:1+{{v}}}}\", \"{{#expr:1+{{v}}}}\") def test_parserfn3(self): self.backcvt(\"{{ROOTPAGENAME}}\", \"{{ROOTPAGENAME:}}\") def test_url1(self): self.backcvt(\"[https://wikipedia.org]\", \"[https://wikipedia.org]\")", "def test_title2(self): self.backcvt(\"=== T1 ===\\nxyz\\n\", \"\\n=== T1 ===\\n\\nxyz\\n\") def test_title3(self): self.backcvt(\"==== T1 ====\\nxyz\\n\",", "self.backcvt(\"abc\\n*a\\n* b\\ndef\", \"abc\\n*a\\n* b\\ndef\") def test_list3(self): self.backcvt(\"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\", \"abc\\n*a\\n*# c\\n*# d\\n*", "(print_tree, NodeKind, WikiNode) def parse_with_ctx(title, text, **kwargs): assert isinstance(title, str) assert isinstance(text, str)", "test_html1(self): self.backcvt(\"a<b>foo</b>b\", \"a<b>foo</b>b\") def test_html1(self): self.backcvt('a<span class=\"bar\">foo</span>b', 'a<span class=\"bar\">foo</span>b') def test_italic1(self): self.backcvt(\"''i''\", \"''i''\")", "======\\nxyz\\n\", \"\\n====== T1 ======\\n\\nxyz\\n\") def test_hline1(self): self.backcvt(\"aaa\\n----\\nbbbb\", \"aaa\\n\\n----\\n\\nbbbb\") def test_list1(self): self.backcvt(\"*a\\n* b\\n\", \"*a\\n*", "expected) def tohtml(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings,", "def test_template3(self): self.backcvt(\"{{!}}\", \"{{!}}\") def test_templatearg1(self): self.backcvt(\"{{{1}}}\", \"{{{1}}}\") def test_templatearg1(self): self.backcvt(\"{{{{{templ}}}}}\", \"{{{{{templ}}}}}\") def", "# Tests for WikiText parsing # # Copyright (c) 2020-2021 <NAME>. See file", "b\\ndef\", \"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\") def test_list4(self): self.backcvt(\"abc\\n*a\\n**b\\n*:c\\n\", \"abc\\n*a\\n**b\\n*:c\\n\") def test_pre1(self): self.backcvt(\"a<pre>foo\\n bar</pre>b\",", "parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_wikitext(root) self.assertEqual(t, expected) def tohtml(self,", "ctx = parse_with_ctx(title, text, **kwargs) assert isinstance(root, WikiNode) assert isinstance(ctx, Wtp) return root", "type(root), root) return root, ctx def parse(title, text, **kwargs): root, ctx = parse_with_ctx(title,", "\"[https://wikipedia.org]\") def test_url2(self): self.backcvt(\"https://wikipedia.org/\", \"[https://wikipedia.org/]\") def test_url3(self): self.backcvt(\"https://wikipedia.org/x/y?a=7%255\", \"[https://wikipedia.org/x/y?a=7%255]\") def test_table1(self): self.backcvt(\"{| |}\",", "t = ctx.node_to_html(root) self.assertEqual(t, expected) def totext(self, text, expected): root, ctx = parse_with_ctx(\"test\",", "def test_tablecaption1(self): self.backcvt(\"{|\\n|+\\ncapt\\n|}\", \"\\n{| \\n\\n|+ \\n\\ncapt\\n\\n|}\\n\") def test_tablerowcell1(self): self.backcvt(\"{|\\n|- a=1\\n| cell\\n|}\", '\\n{| \\n\\n|-", "parsing # # Copyright (c) 2020-2021 <NAME>. See file LICENSE and https://ylonen.org import", "from wikitextprocessor import Wtp from wikitextprocessor.parser import (print_tree, NodeKind, WikiNode) def parse_with_ctx(title, text,", "bar</pre>b\") def test_preformatted1(self): self.backcvt(\" a\\n b\", \" a\\n b\") def test_link1(self): self.backcvt(\"[[foo bar]]\",", "Wtp() ctx.analyze_templates() ctx.start_page(title) root = ctx.parse(text, **kwargs) print(\"parse_with_ctx: root\", type(root), root) return root,", "test_template1(self): self.backcvt(\"{{foo|a|b|c=4|{{{arg}}}}}\", \"{{foo|a|b|c=4|{{{arg}}}}}\") def test_template2(self): self.backcvt(\"{{foo}}\", \"{{foo}}\") def test_template3(self): self.backcvt(\"{{!}}\", \"{{!}}\") def test_templatearg1(self):", "\"a\\n\\n__TOC__\\n\\nb\") def test_html1(self): self.backcvt(\"a<b>foo</b>b\", \"a<b>foo</b>b\") def test_html1(self): self.backcvt('a<span class=\"bar\">foo</span>b', 'a<span class=\"bar\">foo</span>b') def test_italic1(self):", "= parse_with_ctx(title, text, **kwargs) assert isinstance(root, WikiNode) assert isinstance(ctx, Wtp) return root class", "bar</pre>b\", \"a<pre>foo\\n bar</pre>b\") def test_preformatted1(self): self.backcvt(\" a\\n b\", \" a\\n b\") def test_link1(self):", "\"\\n{| \\n\\n|+ \\n\\ncapt\\n\\n|}\\n\") def test_tablerowcell1(self): self.backcvt(\"{|\\n|- a=1\\n| cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n| cell\\n\\n\\n|}\\n') def", "a=1\\n! cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n! cell\\n\\n\\n|}\\n') def test_magicword1(self): self.backcvt(\"a\\n__TOC__\\nb\", \"a\\n\\n__TOC__\\n\\nb\") def test_html1(self): self.backcvt(\"a<b>foo</b>b\",", "def test_text2(self): self.totext(\"\\nfoo bar \", \"foo bar\") def test_text3(self): self.totext(\"<b>foo</b>\", \"foo\") def test_text4(self):", "# # Copyright (c) 2020-2021 <NAME>. See file LICENSE and https://ylonen.org import unittest", "ctx def parse(title, text, **kwargs): root, ctx = parse_with_ctx(title, text, **kwargs) assert isinstance(root,", "cell\\n\\n\\n|}\\n') def test_tablerowhdr1(self): self.backcvt(\"{|\\n|- a=1\\n! cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n! cell\\n\\n\\n|}\\n') def test_magicword1(self): self.backcvt(\"a\\n__TOC__\\nb\",", "test_url1(self): self.backcvt(\"[https://wikipedia.org]\", \"[https://wikipedia.org]\") def test_url2(self): self.backcvt(\"https://wikipedia.org/\", \"[https://wikipedia.org/]\") def test_url3(self): self.backcvt(\"https://wikipedia.org/x/y?a=7%255\", \"[https://wikipedia.org/x/y?a=7%255]\") def test_table1(self):", "self.assertEqual(t, expected) def tohtml(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, [])", "test_templatearg2(self): self.backcvt(\"{{{a|def}}}\", \"{{{a|def}}}\") def test_templatearg3(self): self.backcvt(\"{{{a|}}}\", \"{{{a|}}}\") def test_parserfn1(self): self.backcvt(\"{{#expr: 1 + 2}}\",", "T1 ======\\n\\nxyz\\n\") def test_hline1(self): self.backcvt(\"aaa\\n----\\nbbbb\", \"aaa\\n\\n----\\n\\nbbbb\") def test_list1(self): self.backcvt(\"*a\\n* b\\n\", \"*a\\n* b\\n\") def", "c\\n*# d\\n* b\\ndef\") def test_list4(self): self.backcvt(\"abc\\n*a\\n**b\\n*:c\\n\", \"abc\\n*a\\n**b\\n*:c\\n\") def test_pre1(self): self.backcvt(\"a<pre>foo\\n bar</pre>b\", \"a<pre>foo\\n bar</pre>b\")", "self.backcvt(\"&amp;amp;\", \"&amp;amp;\") def test_basic4(self): self.backcvt(\"{{\", \"{{\") def test_title1(self): self.backcvt(\"== T1 ==\\nxyz\\n\", \"\\n== T1", "def test_basic2(self): self.backcvt(\"foo bar\\nxyz\\n\", \"foo bar\\nxyz\\n\") def test_basic3(self): self.backcvt(\"&amp;amp;\", \"&amp;amp;\") def test_basic4(self): self.backcvt(\"{{\",", "bar\\nxyz\\n\") def test_basic3(self): self.backcvt(\"&amp;amp;\", \"&amp;amp;\") def test_basic4(self): self.backcvt(\"{{\", \"{{\") def test_title1(self): self.backcvt(\"== T1", "self.totext(\"\\nfoo bar \", \"foo bar\") def test_text3(self): self.totext(\"<b>foo</b>\", \"foo\") def test_text4(self): self.totext(\"<h1>foo</h1><p>bar</p>\", \"foo\\n\\nbar\")", "a\\n b\", \" a\\n b\") def test_link1(self): self.backcvt(\"[[foo bar]]\", \"[[foo bar]]\") def test_link2(self):", "import Wtp from wikitextprocessor.parser import (print_tree, NodeKind, WikiNode) def parse_with_ctx(title, text, **kwargs): assert", "\"a [[foo]]s bar\") def test_template1(self): self.backcvt(\"{{foo|a|b|c=4|{{{arg}}}}}\", \"{{foo|a|b|c=4|{{{arg}}}}}\") def test_template2(self): self.backcvt(\"{{foo}}\", \"{{foo}}\") def test_template3(self):", "\\n\\ncapt\\n\\n|}\\n\") def test_tablerowcell1(self): self.backcvt(\"{|\\n|- a=1\\n| cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n| cell\\n\\n\\n|}\\n') def test_tablerowhdr1(self): self.backcvt(\"{|\\n|-", "def test_link2(self): self.backcvt(\"[[foo|bar]]\", \"[[foo|bar]]\") def test_link3(self): self.backcvt(\"a [[foo]]s bar\", \"a [[foo]]s bar\") def", "\"{{{a|}}}\") def test_parserfn1(self): self.backcvt(\"{{#expr: 1 + 2}}\", \"{{#expr: 1 + 2}}\") def test_parserfn2(self):", "=====\\nxyz\\n\", \"\\n===== T1 =====\\n\\nxyz\\n\") def test_title5(self): self.backcvt(\"====== T1 ======\\nxyz\\n\", \"\\n====== T1 ======\\n\\nxyz\\n\") def", "'a<span class=\"bar\">foo</span>b') def test_italic1(self): self.backcvt(\"''i''\", \"''i''\") def test_bold1(self): self.backcvt(\"''b''\", \"''b''\") def test_text1(self): self.totext(\"\",", "T1 ====\\n\\nxyz\\n\") def test_title4(self): self.backcvt(\"===== T1 =====\\nxyz\\n\", \"\\n===== T1 =====\\n\\nxyz\\n\") def test_title5(self): self.backcvt(\"======", "test_table1(self): self.backcvt(\"{| |}\", \"\\n{| \\n\\n|}\\n\") def test_table2(self): self.backcvt('{| class=\"x\"\\n|}', '\\n{| class=\"x\"\\n\\n|}\\n') def test_tablecaption1(self):", "test_title4(self): self.backcvt(\"===== T1 =====\\nxyz\\n\", \"\\n===== T1 =====\\n\\nxyz\\n\") def test_title5(self): self.backcvt(\"====== T1 ======\\nxyz\\n\", \"\\n======", "Tests for WikiText parsing # # Copyright (c) 2020-2021 <NAME>. See file LICENSE", "return root, ctx def parse(title, text, **kwargs): root, ctx = parse_with_ctx(title, text, **kwargs)", "test_basic3(self): self.backcvt(\"&amp;amp;\", \"&amp;amp;\") def test_basic4(self): self.backcvt(\"{{\", \"{{\") def test_title1(self): self.backcvt(\"== T1 ==\\nxyz\\n\", \"\\n==", "ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_text(root) self.assertEqual(t, expected)", "def test_text3(self): self.totext(\"<b>foo</b>\", \"foo\") def test_text4(self): self.totext(\"<h1>foo</h1><p>bar</p>\", \"foo\\n\\nbar\") def test_text5(self): self.totext(\"foo<ref x=1>bar</ref> z\",", "def backcvt(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, [])", "class NodeExpTests(unittest.TestCase): def backcvt(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, [])", "self.backcvt(\" a\\n b\", \" a\\n b\") def test_link1(self): self.backcvt(\"[[foo bar]]\", \"[[foo bar]]\") def", "**kwargs): assert isinstance(title, str) assert isinstance(text, str) ctx = Wtp() ctx.analyze_templates() ctx.start_page(title) root", "[]) t = ctx.node_to_text(root) self.assertEqual(t, expected) def test_basic1(self): self.backcvt(\"\", \"\") def test_basic2(self): self.backcvt(\"foo", "self.backcvt(\"aaa\\n----\\nbbbb\", \"aaa\\n\\n----\\n\\nbbbb\") def test_list1(self): self.backcvt(\"*a\\n* b\\n\", \"*a\\n* b\\n\") def test_list2(self): self.backcvt(\"abc\\n*a\\n* b\\ndef\", \"abc\\n*a\\n*", "def test_parserfn3(self): self.backcvt(\"{{ROOTPAGENAME}}\", \"{{ROOTPAGENAME:}}\") def test_url1(self): self.backcvt(\"[https://wikipedia.org]\", \"[https://wikipedia.org]\") def test_url2(self): self.backcvt(\"https://wikipedia.org/\", \"[https://wikipedia.org/]\") def", "def test_title5(self): self.backcvt(\"====== T1 ======\\nxyz\\n\", \"\\n====== T1 ======\\n\\nxyz\\n\") def test_hline1(self): self.backcvt(\"aaa\\n----\\nbbbb\", \"aaa\\n\\n----\\n\\nbbbb\") def", "def test_magicword1(self): self.backcvt(\"a\\n__TOC__\\nb\", \"a\\n\\n__TOC__\\n\\nb\") def test_html1(self): self.backcvt(\"a<b>foo</b>b\", \"a<b>foo</b>b\") def test_html1(self): self.backcvt('a<span class=\"bar\">foo</span>b', 'a<span", "def test_list2(self): self.backcvt(\"abc\\n*a\\n* b\\ndef\", \"abc\\n*a\\n* b\\ndef\") def test_list3(self): self.backcvt(\"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\", \"abc\\n*a\\n*#", "T1 ===\\nxyz\\n\", \"\\n=== T1 ===\\n\\nxyz\\n\") def test_title3(self): self.backcvt(\"==== T1 ====\\nxyz\\n\", \"\\n==== T1 ====\\n\\nxyz\\n\")", "def test_table2(self): self.backcvt('{| class=\"x\"\\n|}', '\\n{| class=\"x\"\\n\\n|}\\n') def test_tablecaption1(self): self.backcvt(\"{|\\n|+\\ncapt\\n|}\", \"\\n{| \\n\\n|+ \\n\\ncapt\\n\\n|}\\n\") def", "self.backcvt(\"{{{a|}}}\", \"{{{a|}}}\") def test_parserfn1(self): self.backcvt(\"{{#expr: 1 + 2}}\", \"{{#expr: 1 + 2}}\") def", "\"[[foo|bar]]\") def test_link3(self): self.backcvt(\"a [[foo]]s bar\", \"a [[foo]]s bar\") def test_template1(self): self.backcvt(\"{{foo|a|b|c=4|{{{arg}}}}}\", \"{{foo|a|b|c=4|{{{arg}}}}}\")", "bar\\nxyz\\n\", \"foo bar\\nxyz\\n\") def test_basic3(self): self.backcvt(\"&amp;amp;\", \"&amp;amp;\") def test_basic4(self): self.backcvt(\"{{\", \"{{\") def test_title1(self):", "def test_basic1(self): self.backcvt(\"\", \"\") def test_basic2(self): self.backcvt(\"foo bar\\nxyz\\n\", \"foo bar\\nxyz\\n\") def test_basic3(self): self.backcvt(\"&amp;amp;\",", "str) assert isinstance(text, str) ctx = Wtp() ctx.analyze_templates() ctx.start_page(title) root = ctx.parse(text, **kwargs)", "def test_list1(self): self.backcvt(\"*a\\n* b\\n\", \"*a\\n* b\\n\") def test_list2(self): self.backcvt(\"abc\\n*a\\n* b\\ndef\", \"abc\\n*a\\n* b\\ndef\") def", "\"&amp;amp;\") def test_basic4(self): self.backcvt(\"{{\", \"{{\") def test_title1(self): self.backcvt(\"== T1 ==\\nxyz\\n\", \"\\n== T1 ==\\n\\nxyz\\n\")", "self.backcvt(\"{{foo}}\", \"{{foo}}\") def test_template3(self): self.backcvt(\"{{!}}\", \"{{!}}\") def test_templatearg1(self): self.backcvt(\"{{{1}}}\", \"{{{1}}}\") def test_templatearg1(self): self.backcvt(\"{{{{{templ}}}}}\",", "test_template2(self): self.backcvt(\"{{foo}}\", \"{{foo}}\") def test_template3(self): self.backcvt(\"{{!}}\", \"{{!}}\") def test_templatearg1(self): self.backcvt(\"{{{1}}}\", \"{{{1}}}\") def test_templatearg1(self):", "self.backcvt(\"{{ROOTPAGENAME}}\", \"{{ROOTPAGENAME:}}\") def test_url1(self): self.backcvt(\"[https://wikipedia.org]\", \"[https://wikipedia.org]\") def test_url2(self): self.backcvt(\"https://wikipedia.org/\", \"[https://wikipedia.org/]\") def test_url3(self): self.backcvt(\"https://wikipedia.org/x/y?a=7%255\",", "self.totext(\"\", \"\") def test_text2(self): self.totext(\"\\nfoo bar \", \"foo bar\") def test_text3(self): self.totext(\"<b>foo</b>\", \"foo\")", "expected) def test_basic1(self): self.backcvt(\"\", \"\") def test_basic2(self): self.backcvt(\"foo bar\\nxyz\\n\", \"foo bar\\nxyz\\n\") def test_basic3(self):", "See file LICENSE and https://ylonen.org import unittest from wikitextprocessor import Wtp from wikitextprocessor.parser", "test_list3(self): self.backcvt(\"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\", \"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\") def test_list4(self): self.backcvt(\"abc\\n*a\\n**b\\n*:c\\n\", \"abc\\n*a\\n**b\\n*:c\\n\")", "self.backcvt(\"{{\", \"{{\") def test_title1(self): self.backcvt(\"== T1 ==\\nxyz\\n\", \"\\n== T1 ==\\n\\nxyz\\n\") def test_title2(self): self.backcvt(\"===", "self.backcvt(\"{{!}}\", \"{{!}}\") def test_templatearg1(self): self.backcvt(\"{{{1}}}\", \"{{{1}}}\") def test_templatearg1(self): self.backcvt(\"{{{{{templ}}}}}\", \"{{{{{templ}}}}}\") def test_templatearg2(self): self.backcvt(\"{{{a|def}}}\",", "\\n\\n|- a=\"1\"\\n\\n! cell\\n\\n\\n|}\\n') def test_magicword1(self): self.backcvt(\"a\\n__TOC__\\nb\", \"a\\n\\n__TOC__\\n\\nb\") def test_html1(self): self.backcvt(\"a<b>foo</b>b\", \"a<b>foo</b>b\") def test_html1(self):", "'\\n{| \\n\\n|- a=\"1\"\\n\\n! cell\\n\\n\\n|}\\n') def test_magicword1(self): self.backcvt(\"a\\n__TOC__\\nb\", \"a\\n\\n__TOC__\\n\\nb\") def test_html1(self): self.backcvt(\"a<b>foo</b>b\", \"a<b>foo</b>b\") def", "b\\ndef\") def test_list3(self): self.backcvt(\"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\", \"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\") def test_list4(self):", "class=\"bar\">foo</span>b') def test_italic1(self): self.backcvt(\"''i''\", \"''i''\") def test_bold1(self): self.backcvt(\"''b''\", \"''b''\") def test_text1(self): self.totext(\"\", \"\")", "\"''i''\") def test_bold1(self): self.backcvt(\"''b''\", \"''b''\") def test_text1(self): self.totext(\"\", \"\") def test_text2(self): self.totext(\"\\nfoo bar", "= Wtp() ctx.analyze_templates() ctx.start_page(title) root = ctx.parse(text, **kwargs) print(\"parse_with_ctx: root\", type(root), root) return", "root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_html(root) self.assertEqual(t,", "test_title1(self): self.backcvt(\"== T1 ==\\nxyz\\n\", \"\\n== T1 ==\\n\\nxyz\\n\") def test_title2(self): self.backcvt(\"=== T1 ===\\nxyz\\n\", \"\\n===", "self.backcvt('{| class=\"x\"\\n|}', '\\n{| class=\"x\"\\n\\n|}\\n') def test_tablecaption1(self): self.backcvt(\"{|\\n|+\\ncapt\\n|}\", \"\\n{| \\n\\n|+ \\n\\ncapt\\n\\n|}\\n\") def test_tablerowcell1(self): self.backcvt(\"{|\\n|-", "test_table2(self): self.backcvt('{| class=\"x\"\\n|}', '\\n{| class=\"x\"\\n\\n|}\\n') def test_tablecaption1(self): self.backcvt(\"{|\\n|+\\ncapt\\n|}\", \"\\n{| \\n\\n|+ \\n\\ncapt\\n\\n|}\\n\") def test_tablerowcell1(self):", "str) ctx = Wtp() ctx.analyze_templates() ctx.start_page(title) root = ctx.parse(text, **kwargs) print(\"parse_with_ctx: root\", type(root),", "[]) t = ctx.node_to_html(root) self.assertEqual(t, expected) def totext(self, text, expected): root, ctx =", "text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t =", "ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_wikitext(root) self.assertEqual(t, expected)", "+ 2}}\", \"{{#expr: 1 + 2}}\") def test_parserfn2(self): self.backcvt(\"{{#expr:1+{{v}}}}\", \"{{#expr:1+{{v}}}}\") def test_parserfn3(self): self.backcvt(\"{{ROOTPAGENAME}}\",", "\"{{!}}\") def test_templatearg1(self): self.backcvt(\"{{{1}}}\", \"{{{1}}}\") def test_templatearg1(self): self.backcvt(\"{{{{{templ}}}}}\", \"{{{{{templ}}}}}\") def test_templatearg2(self): self.backcvt(\"{{{a|def}}}\", \"{{{a|def}}}\")", "\", \"foo bar\") def test_text3(self): self.totext(\"<b>foo</b>\", \"foo\") def test_text4(self): self.totext(\"<h1>foo</h1><p>bar</p>\", \"foo\\n\\nbar\") def test_text5(self):", "\"foo bar\") def test_text3(self): self.totext(\"<b>foo</b>\", \"foo\") def test_text4(self): self.totext(\"<h1>foo</h1><p>bar</p>\", \"foo\\n\\nbar\") def test_text5(self): self.totext(\"foo<ref", "= ctx.node_to_html(root) self.assertEqual(t, expected) def totext(self, text, expected): root, ctx = parse_with_ctx(\"test\", text)", "class=\"x\"\\n\\n|}\\n') def test_tablecaption1(self): self.backcvt(\"{|\\n|+\\ncapt\\n|}\", \"\\n{| \\n\\n|+ \\n\\ncapt\\n\\n|}\\n\") def test_tablerowcell1(self): self.backcvt(\"{|\\n|- a=1\\n| cell\\n|}\", '\\n{|", "parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_html(root) self.assertEqual(t, expected) def totext(self,", "assert isinstance(ctx, Wtp) return root class NodeExpTests(unittest.TestCase): def backcvt(self, text, expected): root, ctx", "self.backcvt(\"a\\n__TOC__\\nb\", \"a\\n\\n__TOC__\\n\\nb\") def test_html1(self): self.backcvt(\"a<b>foo</b>b\", \"a<b>foo</b>b\") def test_html1(self): self.backcvt('a<span class=\"bar\">foo</span>b', 'a<span class=\"bar\">foo</span>b') def", "'\\n{| \\n\\n|- a=\"1\"\\n\\n| cell\\n\\n\\n|}\\n') def test_tablerowhdr1(self): self.backcvt(\"{|\\n|- a=1\\n! cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n! cell\\n\\n\\n|}\\n')", "self.backcvt(\"''i''\", \"''i''\") def test_bold1(self): self.backcvt(\"''b''\", \"''b''\") def test_text1(self): self.totext(\"\", \"\") def test_text2(self): self.totext(\"\\nfoo", "cell\\n\\n\\n|}\\n') def test_magicword1(self): self.backcvt(\"a\\n__TOC__\\nb\", \"a\\n\\n__TOC__\\n\\nb\") def test_html1(self): self.backcvt(\"a<b>foo</b>b\", \"a<b>foo</b>b\") def test_html1(self): self.backcvt('a<span class=\"bar\">foo</span>b',", "\"{{\") def test_title1(self): self.backcvt(\"== T1 ==\\nxyz\\n\", \"\\n== T1 ==\\n\\nxyz\\n\") def test_title2(self): self.backcvt(\"=== T1", "1 + 2}}\") def test_parserfn2(self): self.backcvt(\"{{#expr:1+{{v}}}}\", \"{{#expr:1+{{v}}}}\") def test_parserfn3(self): self.backcvt(\"{{ROOTPAGENAME}}\", \"{{ROOTPAGENAME:}}\") def test_url1(self):", "= ctx.node_to_text(root) self.assertEqual(t, expected) def test_basic1(self): self.backcvt(\"\", \"\") def test_basic2(self): self.backcvt(\"foo bar\\nxyz\\n\", \"foo", "print(\"parse_with_ctx: root\", type(root), root) return root, ctx def parse(title, text, **kwargs): root, ctx", "text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_text(root) self.assertEqual(t, expected) def test_basic1(self): self.backcvt(\"\",", "return root class NodeExpTests(unittest.TestCase): def backcvt(self, text, expected): root, ctx = parse_with_ctx(\"test\", text)", "file LICENSE and https://ylonen.org import unittest from wikitextprocessor import Wtp from wikitextprocessor.parser import", "[]) self.assertEqual(ctx.warnings, []) t = ctx.node_to_wikitext(root) self.assertEqual(t, expected) def tohtml(self, text, expected): root,", "\"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\") def test_list4(self): self.backcvt(\"abc\\n*a\\n**b\\n*:c\\n\", \"abc\\n*a\\n**b\\n*:c\\n\") def test_pre1(self): self.backcvt(\"a<pre>foo\\n bar</pre>b\", \"a<pre>foo\\n", "ctx.analyze_templates() ctx.start_page(title) root = ctx.parse(text, **kwargs) print(\"parse_with_ctx: root\", type(root), root) return root, ctx", "self.backcvt(\"{{{{{templ}}}}}\", \"{{{{{templ}}}}}\") def test_templatearg2(self): self.backcvt(\"{{{a|def}}}\", \"{{{a|def}}}\") def test_templatearg3(self): self.backcvt(\"{{{a|}}}\", \"{{{a|}}}\") def test_parserfn1(self): self.backcvt(\"{{#expr:", "# Copyright (c) 2020-2021 <NAME>. See file LICENSE and https://ylonen.org import unittest from", "root) return root, ctx def parse(title, text, **kwargs): root, ctx = parse_with_ctx(title, text,", "\"{{foo}}\") def test_template3(self): self.backcvt(\"{{!}}\", \"{{!}}\") def test_templatearg1(self): self.backcvt(\"{{{1}}}\", \"{{{1}}}\") def test_templatearg1(self): self.backcvt(\"{{{{{templ}}}}}\", \"{{{{{templ}}}}}\")", "= parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_text(root) self.assertEqual(t, expected) def", "def test_html1(self): self.backcvt(\"a<b>foo</b>b\", \"a<b>foo</b>b\") def test_html1(self): self.backcvt('a<span class=\"bar\">foo</span>b', 'a<span class=\"bar\">foo</span>b') def test_italic1(self): self.backcvt(\"''i''\",", "def totext(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, [])", "test_title5(self): self.backcvt(\"====== T1 ======\\nxyz\\n\", \"\\n====== T1 ======\\n\\nxyz\\n\") def test_hline1(self): self.backcvt(\"aaa\\n----\\nbbbb\", \"aaa\\n\\n----\\n\\nbbbb\") def test_list1(self):", "test_title3(self): self.backcvt(\"==== T1 ====\\nxyz\\n\", \"\\n==== T1 ====\\n\\nxyz\\n\") def test_title4(self): self.backcvt(\"===== T1 =====\\nxyz\\n\", \"\\n=====", "Wtp) return root class NodeExpTests(unittest.TestCase): def backcvt(self, text, expected): root, ctx = parse_with_ctx(\"test\",", "b\\ndef\", \"abc\\n*a\\n* b\\ndef\") def test_list3(self): self.backcvt(\"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\", \"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\")", "ctx.start_page(title) root = ctx.parse(text, **kwargs) print(\"parse_with_ctx: root\", type(root), root) return root, ctx def", "for WikiText parsing # # Copyright (c) 2020-2021 <NAME>. See file LICENSE and", "NodeKind, WikiNode) def parse_with_ctx(title, text, **kwargs): assert isinstance(title, str) assert isinstance(text, str) ctx", "def test_link3(self): self.backcvt(\"a [[foo]]s bar\", \"a [[foo]]s bar\") def test_template1(self): self.backcvt(\"{{foo|a|b|c=4|{{{arg}}}}}\", \"{{foo|a|b|c=4|{{{arg}}}}}\") def", "def test_template1(self): self.backcvt(\"{{foo|a|b|c=4|{{{arg}}}}}\", \"{{foo|a|b|c=4|{{{arg}}}}}\") def test_template2(self): self.backcvt(\"{{foo}}\", \"{{foo}}\") def test_template3(self): self.backcvt(\"{{!}}\", \"{{!}}\") def", "isinstance(title, str) assert isinstance(text, str) ctx = Wtp() ctx.analyze_templates() ctx.start_page(title) root = ctx.parse(text,", "T1 ==\\nxyz\\n\", \"\\n== T1 ==\\n\\nxyz\\n\") def test_title2(self): self.backcvt(\"=== T1 ===\\nxyz\\n\", \"\\n=== T1 ===\\n\\nxyz\\n\")", "LICENSE and https://ylonen.org import unittest from wikitextprocessor import Wtp from wikitextprocessor.parser import (print_tree,", "self.backcvt(\"[[foo bar]]\", \"[[foo bar]]\") def test_link2(self): self.backcvt(\"[[foo|bar]]\", \"[[foo|bar]]\") def test_link3(self): self.backcvt(\"a [[foo]]s bar\",", "= parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_html(root) self.assertEqual(t, expected) def", "self.backcvt(\"\", \"\") def test_basic2(self): self.backcvt(\"foo bar\\nxyz\\n\", \"foo bar\\nxyz\\n\") def test_basic3(self): self.backcvt(\"&amp;amp;\", \"&amp;amp;\") def", "self.backcvt(\"{{#expr:1+{{v}}}}\", \"{{#expr:1+{{v}}}}\") def test_parserfn3(self): self.backcvt(\"{{ROOTPAGENAME}}\", \"{{ROOTPAGENAME:}}\") def test_url1(self): self.backcvt(\"[https://wikipedia.org]\", \"[https://wikipedia.org]\") def test_url2(self): self.backcvt(\"https://wikipedia.org/\",", "\"[https://wikipedia.org/]\") def test_url3(self): self.backcvt(\"https://wikipedia.org/x/y?a=7%255\", \"[https://wikipedia.org/x/y?a=7%255]\") def test_table1(self): self.backcvt(\"{| |}\", \"\\n{| \\n\\n|}\\n\") def test_table2(self):", "root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_text(root) self.assertEqual(t,", "====\\nxyz\\n\", \"\\n==== T1 ====\\n\\nxyz\\n\") def test_title4(self): self.backcvt(\"===== T1 =====\\nxyz\\n\", \"\\n===== T1 =====\\n\\nxyz\\n\") def", "self.backcvt(\"[[foo|bar]]\", \"[[foo|bar]]\") def test_link3(self): self.backcvt(\"a [[foo]]s bar\", \"a [[foo]]s bar\") def test_template1(self): self.backcvt(\"{{foo|a|b|c=4|{{{arg}}}}}\",", "WikiNode) assert isinstance(ctx, Wtp) return root class NodeExpTests(unittest.TestCase): def backcvt(self, text, expected): root,", "self.backcvt(\"a<pre>foo\\n bar</pre>b\", \"a<pre>foo\\n bar</pre>b\") def test_preformatted1(self): self.backcvt(\" a\\n b\", \" a\\n b\") def", "def parse_with_ctx(title, text, **kwargs): assert isinstance(title, str) assert isinstance(text, str) ctx = Wtp()", "self.assertEqual(t, expected) def totext(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, [])", "T1 ====\\nxyz\\n\", \"\\n==== T1 ====\\n\\nxyz\\n\") def test_title4(self): self.backcvt(\"===== T1 =====\\nxyz\\n\", \"\\n===== T1 =====\\n\\nxyz\\n\")", "d\\n* b\\ndef\", \"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\") def test_list4(self): self.backcvt(\"abc\\n*a\\n**b\\n*:c\\n\", \"abc\\n*a\\n**b\\n*:c\\n\") def test_pre1(self): self.backcvt(\"a<pre>foo\\n", "def test_title4(self): self.backcvt(\"===== T1 =====\\nxyz\\n\", \"\\n===== T1 =====\\n\\nxyz\\n\") def test_title5(self): self.backcvt(\"====== T1 ======\\nxyz\\n\",", "def test_pre1(self): self.backcvt(\"a<pre>foo\\n bar</pre>b\", \"a<pre>foo\\n bar</pre>b\") def test_preformatted1(self): self.backcvt(\" a\\n b\", \" a\\n", "def test_tablerowhdr1(self): self.backcvt(\"{|\\n|- a=1\\n! cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n! cell\\n\\n\\n|}\\n') def test_magicword1(self): self.backcvt(\"a\\n__TOC__\\nb\", \"a\\n\\n__TOC__\\n\\nb\")", "\"''b''\") def test_text1(self): self.totext(\"\", \"\") def test_text2(self): self.totext(\"\\nfoo bar \", \"foo bar\") def", "test_pre1(self): self.backcvt(\"a<pre>foo\\n bar</pre>b\", \"a<pre>foo\\n bar</pre>b\") def test_preformatted1(self): self.backcvt(\" a\\n b\", \" a\\n b\")", "\"{{{{{templ}}}}}\") def test_templatearg2(self): self.backcvt(\"{{{a|def}}}\", \"{{{a|def}}}\") def test_templatearg3(self): self.backcvt(\"{{{a|}}}\", \"{{{a|}}}\") def test_parserfn1(self): self.backcvt(\"{{#expr: 1", "parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_text(root) self.assertEqual(t, expected) def test_basic1(self):", "**kwargs): root, ctx = parse_with_ctx(title, text, **kwargs) assert isinstance(root, WikiNode) assert isinstance(ctx, Wtp)", "test_parserfn1(self): self.backcvt(\"{{#expr: 1 + 2}}\", \"{{#expr: 1 + 2}}\") def test_parserfn2(self): self.backcvt(\"{{#expr:1+{{v}}}}\", \"{{#expr:1+{{v}}}}\")", "\"[https://wikipedia.org/x/y?a=7%255]\") def test_table1(self): self.backcvt(\"{| |}\", \"\\n{| \\n\\n|}\\n\") def test_table2(self): self.backcvt('{| class=\"x\"\\n|}', '\\n{| class=\"x\"\\n\\n|}\\n')", "self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_wikitext(root) self.assertEqual(t, expected) def tohtml(self, text, expected):", "\"foo bar\\nxyz\\n\") def test_basic3(self): self.backcvt(\"&amp;amp;\", \"&amp;amp;\") def test_basic4(self): self.backcvt(\"{{\", \"{{\") def test_title1(self): self.backcvt(\"==", "def parse(title, text, **kwargs): root, ctx = parse_with_ctx(title, text, **kwargs) assert isinstance(root, WikiNode)", "\"\\n====== T1 ======\\n\\nxyz\\n\") def test_hline1(self): self.backcvt(\"aaa\\n----\\nbbbb\", \"aaa\\n\\n----\\n\\nbbbb\") def test_list1(self): self.backcvt(\"*a\\n* b\\n\", \"*a\\n* b\\n\")", "T1 ===\\n\\nxyz\\n\") def test_title3(self): self.backcvt(\"==== T1 ====\\nxyz\\n\", \"\\n==== T1 ====\\n\\nxyz\\n\") def test_title4(self): self.backcvt(\"=====", "test_title2(self): self.backcvt(\"=== T1 ===\\nxyz\\n\", \"\\n=== T1 ===\\n\\nxyz\\n\") def test_title3(self): self.backcvt(\"==== T1 ====\\nxyz\\n\", \"\\n====", "\"{{{a|def}}}\") def test_templatearg3(self): self.backcvt(\"{{{a|}}}\", \"{{{a|}}}\") def test_parserfn1(self): self.backcvt(\"{{#expr: 1 + 2}}\", \"{{#expr: 1", "2}}\", \"{{#expr: 1 + 2}}\") def test_parserfn2(self): self.backcvt(\"{{#expr:1+{{v}}}}\", \"{{#expr:1+{{v}}}}\") def test_parserfn3(self): self.backcvt(\"{{ROOTPAGENAME}}\", \"{{ROOTPAGENAME:}}\")", "test_tablerowcell1(self): self.backcvt(\"{|\\n|- a=1\\n| cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n| cell\\n\\n\\n|}\\n') def test_tablerowhdr1(self): self.backcvt(\"{|\\n|- a=1\\n! cell\\n|}\",", "self.backcvt('a<span class=\"bar\">foo</span>b', 'a<span class=\"bar\">foo</span>b') def test_italic1(self): self.backcvt(\"''i''\", \"''i''\") def test_bold1(self): self.backcvt(\"''b''\", \"''b''\") def", "===\\n\\nxyz\\n\") def test_title3(self): self.backcvt(\"==== T1 ====\\nxyz\\n\", \"\\n==== T1 ====\\n\\nxyz\\n\") def test_title4(self): self.backcvt(\"===== T1", "\"{{{1}}}\") def test_templatearg1(self): self.backcvt(\"{{{{{templ}}}}}\", \"{{{{{templ}}}}}\") def test_templatearg2(self): self.backcvt(\"{{{a|def}}}\", \"{{{a|def}}}\") def test_templatearg3(self): self.backcvt(\"{{{a|}}}\", \"{{{a|}}}\")", "expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_text(root)", "cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n! cell\\n\\n\\n|}\\n') def test_magicword1(self): self.backcvt(\"a\\n__TOC__\\nb\", \"a\\n\\n__TOC__\\n\\nb\") def test_html1(self): self.backcvt(\"a<b>foo</b>b\", \"a<b>foo</b>b\")", "test_templatearg1(self): self.backcvt(\"{{{{{templ}}}}}\", \"{{{{{templ}}}}}\") def test_templatearg2(self): self.backcvt(\"{{{a|def}}}\", \"{{{a|def}}}\") def test_templatearg3(self): self.backcvt(\"{{{a|}}}\", \"{{{a|}}}\") def test_parserfn1(self):", "self.backcvt(\"=== T1 ===\\nxyz\\n\", \"\\n=== T1 ===\\n\\nxyz\\n\") def test_title3(self): self.backcvt(\"==== T1 ====\\nxyz\\n\", \"\\n==== T1", "\"aaa\\n\\n----\\n\\nbbbb\") def test_list1(self): self.backcvt(\"*a\\n* b\\n\", \"*a\\n* b\\n\") def test_list2(self): self.backcvt(\"abc\\n*a\\n* b\\ndef\", \"abc\\n*a\\n* b\\ndef\")", "a=1\\n| cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n| cell\\n\\n\\n|}\\n') def test_tablerowhdr1(self): self.backcvt(\"{|\\n|- a=1\\n! cell\\n|}\", '\\n{| \\n\\n|-", "root, ctx def parse(title, text, **kwargs): root, ctx = parse_with_ctx(title, text, **kwargs) assert", "def test_link1(self): self.backcvt(\"[[foo bar]]\", \"[[foo bar]]\") def test_link2(self): self.backcvt(\"[[foo|bar]]\", \"[[foo|bar]]\") def test_link3(self): self.backcvt(\"a", "class=\"x\"\\n|}', '\\n{| class=\"x\"\\n\\n|}\\n') def test_tablecaption1(self): self.backcvt(\"{|\\n|+\\ncapt\\n|}\", \"\\n{| \\n\\n|+ \\n\\ncapt\\n\\n|}\\n\") def test_tablerowcell1(self): self.backcvt(\"{|\\n|- a=1\\n|", "self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_html(root) self.assertEqual(t, expected) def totext(self, text, expected):", "= ctx.node_to_wikitext(root) self.assertEqual(t, expected) def tohtml(self, text, expected): root, ctx = parse_with_ctx(\"test\", text)", "\"abc\\n*a\\n* b\\ndef\") def test_list3(self): self.backcvt(\"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\", \"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\") def", "Copyright (c) 2020-2021 <NAME>. See file LICENSE and https://ylonen.org import unittest from wikitextprocessor", "text, **kwargs): root, ctx = parse_with_ctx(title, text, **kwargs) assert isinstance(root, WikiNode) assert isinstance(ctx,", "self.backcvt(\"https://wikipedia.org/\", \"[https://wikipedia.org/]\") def test_url3(self): self.backcvt(\"https://wikipedia.org/x/y?a=7%255\", \"[https://wikipedia.org/x/y?a=7%255]\") def test_table1(self): self.backcvt(\"{| |}\", \"\\n{| \\n\\n|}\\n\") def", "def test_parserfn1(self): self.backcvt(\"{{#expr: 1 + 2}}\", \"{{#expr: 1 + 2}}\") def test_parserfn2(self): self.backcvt(\"{{#expr:1+{{v}}}}\",", "\"a<b>foo</b>b\") def test_html1(self): self.backcvt('a<span class=\"bar\">foo</span>b', 'a<span class=\"bar\">foo</span>b') def test_italic1(self): self.backcvt(\"''i''\", \"''i''\") def test_bold1(self):", "def test_list3(self): self.backcvt(\"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\", \"abc\\n*a\\n*# c\\n*# d\\n* b\\ndef\") def test_list4(self): self.backcvt(\"abc\\n*a\\n**b\\n*:c\\n\",", "text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_html(root) self.assertEqual(t, expected) def totext(self, text,", "======\\n\\nxyz\\n\") def test_hline1(self): self.backcvt(\"aaa\\n----\\nbbbb\", \"aaa\\n\\n----\\n\\nbbbb\") def test_list1(self): self.backcvt(\"*a\\n* b\\n\", \"*a\\n* b\\n\") def test_list2(self):", "\"\\n=== T1 ===\\n\\nxyz\\n\") def test_title3(self): self.backcvt(\"==== T1 ====\\nxyz\\n\", \"\\n==== T1 ====\\n\\nxyz\\n\") def test_title4(self):", "**kwargs) print(\"parse_with_ctx: root\", type(root), root) return root, ctx def parse(title, text, **kwargs): root,", "def test_table1(self): self.backcvt(\"{| |}\", \"\\n{| \\n\\n|}\\n\") def test_table2(self): self.backcvt('{| class=\"x\"\\n|}', '\\n{| class=\"x\"\\n\\n|}\\n') def", "self.assertEqual(ctx.warnings, []) t = ctx.node_to_html(root) self.assertEqual(t, expected) def totext(self, text, expected): root, ctx", "WikiText parsing # # Copyright (c) 2020-2021 <NAME>. See file LICENSE and https://ylonen.org", "tohtml(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t", "self.backcvt(\"''b''\", \"''b''\") def test_text1(self): self.totext(\"\", \"\") def test_text2(self): self.totext(\"\\nfoo bar \", \"foo bar\")", "\\n\\n|}\\n\") def test_table2(self): self.backcvt('{| class=\"x\"\\n|}', '\\n{| class=\"x\"\\n\\n|}\\n') def test_tablecaption1(self): self.backcvt(\"{|\\n|+\\ncapt\\n|}\", \"\\n{| \\n\\n|+ \\n\\ncapt\\n\\n|}\\n\")", "wikitextprocessor.parser import (print_tree, NodeKind, WikiNode) def parse_with_ctx(title, text, **kwargs): assert isinstance(title, str) assert", "<NAME>. See file LICENSE and https://ylonen.org import unittest from wikitextprocessor import Wtp from", "text, **kwargs) assert isinstance(root, WikiNode) assert isinstance(ctx, Wtp) return root class NodeExpTests(unittest.TestCase): def", "root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_wikitext(root) self.assertEqual(t,", "test_basic4(self): self.backcvt(\"{{\", \"{{\") def test_title1(self): self.backcvt(\"== T1 ==\\nxyz\\n\", \"\\n== T1 ==\\n\\nxyz\\n\") def test_title2(self):", "self.backcvt(\"== T1 ==\\nxyz\\n\", \"\\n== T1 ==\\n\\nxyz\\n\") def test_title2(self): self.backcvt(\"=== T1 ===\\nxyz\\n\", \"\\n=== T1", "class=\"bar\">foo</span>b', 'a<span class=\"bar\">foo</span>b') def test_italic1(self): self.backcvt(\"''i''\", \"''i''\") def test_bold1(self): self.backcvt(\"''b''\", \"''b''\") def test_text1(self):", "test_template3(self): self.backcvt(\"{{!}}\", \"{{!}}\") def test_templatearg1(self): self.backcvt(\"{{{1}}}\", \"{{{1}}}\") def test_templatearg1(self): self.backcvt(\"{{{{{templ}}}}}\", \"{{{{{templ}}}}}\") def test_templatearg2(self):", "\"\\n== T1 ==\\n\\nxyz\\n\") def test_title2(self): self.backcvt(\"=== T1 ===\\nxyz\\n\", \"\\n=== T1 ===\\n\\nxyz\\n\") def test_title3(self):", "test_list1(self): self.backcvt(\"*a\\n* b\\n\", \"*a\\n* b\\n\") def test_list2(self): self.backcvt(\"abc\\n*a\\n* b\\ndef\", \"abc\\n*a\\n* b\\ndef\") def test_list3(self):", "self.backcvt(\"foo bar\\nxyz\\n\", \"foo bar\\nxyz\\n\") def test_basic3(self): self.backcvt(\"&amp;amp;\", \"&amp;amp;\") def test_basic4(self): self.backcvt(\"{{\", \"{{\") def", "expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_html(root)", "def test_text1(self): self.totext(\"\", \"\") def test_text2(self): self.totext(\"\\nfoo bar \", \"foo bar\") def test_text3(self):", "expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_wikitext(root)", "t = ctx.node_to_text(root) self.assertEqual(t, expected) def test_basic1(self): self.backcvt(\"\", \"\") def test_basic2(self): self.backcvt(\"foo bar\\nxyz\\n\",", "T1 ==\\n\\nxyz\\n\") def test_title2(self): self.backcvt(\"=== T1 ===\\nxyz\\n\", \"\\n=== T1 ===\\n\\nxyz\\n\") def test_title3(self): self.backcvt(\"====", "2020-2021 <NAME>. See file LICENSE and https://ylonen.org import unittest from wikitextprocessor import Wtp", "from wikitextprocessor.parser import (print_tree, NodeKind, WikiNode) def parse_with_ctx(title, text, **kwargs): assert isinstance(title, str)", "def test_tablerowcell1(self): self.backcvt(\"{|\\n|- a=1\\n| cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n| cell\\n\\n\\n|}\\n') def test_tablerowhdr1(self): self.backcvt(\"{|\\n|- a=1\\n!", "text, **kwargs): assert isinstance(title, str) assert isinstance(text, str) ctx = Wtp() ctx.analyze_templates() ctx.start_page(title)", "self.backcvt(\"{|\\n|+\\ncapt\\n|}\", \"\\n{| \\n\\n|+ \\n\\ncapt\\n\\n|}\\n\") def test_tablerowcell1(self): self.backcvt(\"{|\\n|- a=1\\n| cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n| cell\\n\\n\\n|}\\n')", "https://ylonen.org import unittest from wikitextprocessor import Wtp from wikitextprocessor.parser import (print_tree, NodeKind, WikiNode)", "test_url2(self): self.backcvt(\"https://wikipedia.org/\", \"[https://wikipedia.org/]\") def test_url3(self): self.backcvt(\"https://wikipedia.org/x/y?a=7%255\", \"[https://wikipedia.org/x/y?a=7%255]\") def test_table1(self): self.backcvt(\"{| |}\", \"\\n{| \\n\\n|}\\n\")", "self.backcvt(\"[https://wikipedia.org]\", \"[https://wikipedia.org]\") def test_url2(self): self.backcvt(\"https://wikipedia.org/\", \"[https://wikipedia.org/]\") def test_url3(self): self.backcvt(\"https://wikipedia.org/x/y?a=7%255\", \"[https://wikipedia.org/x/y?a=7%255]\") def test_table1(self): self.backcvt(\"{|", "isinstance(root, WikiNode) assert isinstance(ctx, Wtp) return root class NodeExpTests(unittest.TestCase): def backcvt(self, text, expected):", "test_bold1(self): self.backcvt(\"''b''\", \"''b''\") def test_text1(self): self.totext(\"\", \"\") def test_text2(self): self.totext(\"\\nfoo bar \", \"foo", "isinstance(text, str) ctx = Wtp() ctx.analyze_templates() ctx.start_page(title) root = ctx.parse(text, **kwargs) print(\"parse_with_ctx: root\",", "test_text1(self): self.totext(\"\", \"\") def test_text2(self): self.totext(\"\\nfoo bar \", \"foo bar\") def test_text3(self): self.totext(\"<b>foo</b>\",", "\"\\n===== T1 =====\\n\\nxyz\\n\") def test_title5(self): self.backcvt(\"====== T1 ======\\nxyz\\n\", \"\\n====== T1 ======\\n\\nxyz\\n\") def test_hline1(self):", "==\\nxyz\\n\", \"\\n== T1 ==\\n\\nxyz\\n\") def test_title2(self): self.backcvt(\"=== T1 ===\\nxyz\\n\", \"\\n=== T1 ===\\n\\nxyz\\n\") def", "import unittest from wikitextprocessor import Wtp from wikitextprocessor.parser import (print_tree, NodeKind, WikiNode) def", "test_templatearg3(self): self.backcvt(\"{{{a|}}}\", \"{{{a|}}}\") def test_parserfn1(self): self.backcvt(\"{{#expr: 1 + 2}}\", \"{{#expr: 1 + 2}}\")", "ctx.parse(text, **kwargs) print(\"parse_with_ctx: root\", type(root), root) return root, ctx def parse(title, text, **kwargs):", "unittest from wikitextprocessor import Wtp from wikitextprocessor.parser import (print_tree, NodeKind, WikiNode) def parse_with_ctx(title,", "parse(title, text, **kwargs): root, ctx = parse_with_ctx(title, text, **kwargs) assert isinstance(root, WikiNode) assert", "+ 2}}\") def test_parserfn2(self): self.backcvt(\"{{#expr:1+{{v}}}}\", \"{{#expr:1+{{v}}}}\") def test_parserfn3(self): self.backcvt(\"{{ROOTPAGENAME}}\", \"{{ROOTPAGENAME:}}\") def test_url1(self): self.backcvt(\"[https://wikipedia.org]\",", "\"[[foo bar]]\") def test_link2(self): self.backcvt(\"[[foo|bar]]\", \"[[foo|bar]]\") def test_link3(self): self.backcvt(\"a [[foo]]s bar\", \"a [[foo]]s", "= parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t = ctx.node_to_wikitext(root) self.assertEqual(t, expected) def", "def test_hline1(self): self.backcvt(\"aaa\\n----\\nbbbb\", \"aaa\\n\\n----\\n\\nbbbb\") def test_list1(self): self.backcvt(\"*a\\n* b\\n\", \"*a\\n* b\\n\") def test_list2(self): self.backcvt(\"abc\\n*a\\n*", "root = ctx.parse(text, **kwargs) print(\"parse_with_ctx: root\", type(root), root) return root, ctx def parse(title,", "self.backcvt(\"==== T1 ====\\nxyz\\n\", \"\\n==== T1 ====\\n\\nxyz\\n\") def test_title4(self): self.backcvt(\"===== T1 =====\\nxyz\\n\", \"\\n===== T1", "self.backcvt(\"abc\\n*a\\n**b\\n*:c\\n\", \"abc\\n*a\\n**b\\n*:c\\n\") def test_pre1(self): self.backcvt(\"a<pre>foo\\n bar</pre>b\", \"a<pre>foo\\n bar</pre>b\") def test_preformatted1(self): self.backcvt(\" a\\n b\",", "b\", \" a\\n b\") def test_link1(self): self.backcvt(\"[[foo bar]]\", \"[[foo bar]]\") def test_link2(self): self.backcvt(\"[[foo|bar]]\",", "def tohtml(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, [])", "backcvt(self, text, expected): root, ctx = parse_with_ctx(\"test\", text) self.assertEqual(ctx.errors, []) self.assertEqual(ctx.warnings, []) t", "self.backcvt(\"{|\\n|- a=1\\n| cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n| cell\\n\\n\\n|}\\n') def test_tablerowhdr1(self): self.backcvt(\"{|\\n|- a=1\\n! cell\\n|}\", '\\n{|", "self.assertEqual(t, expected) def test_basic1(self): self.backcvt(\"\", \"\") def test_basic2(self): self.backcvt(\"foo bar\\nxyz\\n\", \"foo bar\\nxyz\\n\") def", "bar]]\", \"[[foo bar]]\") def test_link2(self): self.backcvt(\"[[foo|bar]]\", \"[[foo|bar]]\") def test_link3(self): self.backcvt(\"a [[foo]]s bar\", \"a", "test_preformatted1(self): self.backcvt(\" a\\n b\", \" a\\n b\") def test_link1(self): self.backcvt(\"[[foo bar]]\", \"[[foo bar]]\")", "a=\"1\"\\n\\n| cell\\n\\n\\n|}\\n') def test_tablerowhdr1(self): self.backcvt(\"{|\\n|- a=1\\n! cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n! cell\\n\\n\\n|}\\n') def test_magicword1(self):", "cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n| cell\\n\\n\\n|}\\n') def test_tablerowhdr1(self): self.backcvt(\"{|\\n|- a=1\\n! cell\\n|}\", '\\n{| \\n\\n|- a=\"1\"\\n\\n!", "parse_with_ctx(title, text, **kwargs) assert isinstance(root, WikiNode) assert isinstance(ctx, Wtp) return root class NodeExpTests(unittest.TestCase):", "isinstance(ctx, Wtp) return root class NodeExpTests(unittest.TestCase): def backcvt(self, text, expected): root, ctx =", "def test_preformatted1(self): self.backcvt(\" a\\n b\", \" a\\n b\") def test_link1(self): self.backcvt(\"[[foo bar]]\", \"[[foo", "assert isinstance(text, str) ctx = Wtp() ctx.analyze_templates() ctx.start_page(title) root = ctx.parse(text, **kwargs) print(\"parse_with_ctx:", "[]) self.assertEqual(ctx.warnings, []) t = ctx.node_to_text(root) self.assertEqual(t, expected) def test_basic1(self): self.backcvt(\"\", \"\") def", "def test_templatearg1(self): self.backcvt(\"{{{1}}}\", \"{{{1}}}\") def test_templatearg1(self): self.backcvt(\"{{{{{templ}}}}}\", \"{{{{{templ}}}}}\") def test_templatearg2(self): self.backcvt(\"{{{a|def}}}\", \"{{{a|def}}}\") def", "self.backcvt(\"{| |}\", \"\\n{| \\n\\n|}\\n\") def test_table2(self): self.backcvt('{| class=\"x\"\\n|}', '\\n{| class=\"x\"\\n\\n|}\\n') def test_tablecaption1(self): self.backcvt(\"{|\\n|+\\ncapt\\n|}\",", "def test_templatearg2(self): self.backcvt(\"{{{a|def}}}\", \"{{{a|def}}}\") def test_templatearg3(self): self.backcvt(\"{{{a|}}}\", \"{{{a|}}}\") def test_parserfn1(self): self.backcvt(\"{{#expr: 1 +", "'\\n{| class=\"x\"\\n\\n|}\\n') def test_tablecaption1(self): self.backcvt(\"{|\\n|+\\ncapt\\n|}\", \"\\n{| \\n\\n|+ \\n\\ncapt\\n\\n|}\\n\") def test_tablerowcell1(self): self.backcvt(\"{|\\n|- a=1\\n| cell\\n|}\",", "T1 =====\\n\\nxyz\\n\") def test_title5(self): self.backcvt(\"====== T1 ======\\nxyz\\n\", \"\\n====== T1 ======\\n\\nxyz\\n\") def test_hline1(self): self.backcvt(\"aaa\\n----\\nbbbb\",", "def test_url3(self): self.backcvt(\"https://wikipedia.org/x/y?a=7%255\", \"[https://wikipedia.org/x/y?a=7%255]\") def test_table1(self): self.backcvt(\"{| |}\", \"\\n{| \\n\\n|}\\n\") def test_table2(self): self.backcvt('{|" ]
[ "len(nums2): if median[0] is not None and idx_1 + idx_2 == median[0]: median_sum", "None and idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] idx_1 += 1", "+= nums1[idx_1] idx_1 += 1 median[0] = None continue if median[1] is not", "median_sum += nums1[idx_1 + 1] break if median[1] is not None and idx_1", "False median = [None, None] if is_even: median[1] = int(total_len / 2) median[0]", "1: median_sum += nums1[idx_1] median[0] = None if idx_1 + idx_2 == median[0]:", "idx_2 == median[1]: median_sum += nums2[idx_2] break idx_2 += 1 continue if idx_2", "nums2[median[1]]) / 2.0 else: return nums2[median[0]] if len(nums2) == 0: if is_even: return", "+= 1 idx_2 += 1 elif nums1[idx_1] > nums2[idx_2]: if median[0] is not", "is not None and idx_1 + idx_2 == median[1]: median_sum += nums2[idx_2] break", "None and idx_1 + idx_2 == median[1]: median_sum += nums1[idx_1] break idx_1 +=", "= None continue if median[1] is not None and idx_1 + idx_2 ==", "nums2[idx_2] break idx_2 += 1 else: if median[0] is not None and idx_1", "return median_sum / 2.0 if is_even else median_sum s = Solution() s.findMedianSortedArrays([1, 2],", "and idx_1 + idx_2 == median[1]: median_sum += nums1[idx_1] break idx_1 += 1", "= idx_2 = 0 while True: if median[0] is None and median[1] is", "+= nums2[idx_2] break if idx_1 + idx_2 == median[1] and median[1] is not", "is not None and idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] idx_1", "idx_1 + idx_2 == median[0]: median_sum += nums2[idx_2] idx_2 += 1 median[0] =", "1 continue if nums1[idx_1] == nums2[idx_2]: if median[0] is not None and idx_1", "idx_1 + idx_2 == median[1] and median[1] is not None: median_sum += nums1[idx_1]", "nums2[idx_2]: if median[0] is not None and idx_1 + idx_2 == median[0]: median_sum", "+= 1 else: if median[0] is not None and idx_1 + idx_2 ==", "= len(nums1) + len(nums2) if total_len == 0: raise ValueError('Two arrays cannot both", "be arrays.') total_len = len(nums1) + len(nums2) if total_len == 0: raise ValueError('Two", "float \"\"\" if nums1 is None or nums2 is None: raise ValueError('Inputs should", "break if median[1] is not None and idx_1 + idx_2 == median[1]: median_sum", "if median[0] is not None and idx_1 + idx_2 == median[0] - 1:", "median[0] = None continue if median[1] is not None and idx_1 + idx_2", "= 0 while True: if median[0] is None and median[1] is None: break", "idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] if median[1] is not None:", "not None: median_sum += nums2[idx_2] break if idx_1 + idx_2 == median[1] and", "nums2: List[int] :rtype: float \"\"\" if nums1 is None or nums2 is None:", "== median[0]: median_sum += nums2[idx_2] if median[1] is not None: median_sum += nums2[idx_2", "median[0] is not None and idx_1 + idx_2 == median[0] - 1: median_sum", "# ## https://leetcode.com/problems/median-of-two-sorted-arrays/ # class Solution(object): def findMedianSortedArrays(self, nums1, nums2): \"\"\" :type nums1:", "== len(nums1): if median[0] is not None and idx_1 + idx_2 == median[0]:", "nums1[median[1]]) / 2.0 else: return nums1[median[0]] median_sum = idx_1 = idx_2 = 0", "while True: if median[0] is None and median[1] is None: break if idx_1", "return nums2[median[0]] if len(nums2) == 0: if is_even: return (nums1[median[0]] + nums1[median[1]]) /", "continue if median[1] is not None and idx_1 + idx_2 == median[1]: median_sum", "if len(nums1) == 0: if is_even: return (nums2[median[0]] + nums2[median[1]]) / 2.0 else:", "1 else: if median[0] is not None and idx_1 + idx_2 == median[0]:", "== median[0]: median_sum += nums1[idx_1] if median[1] is not None: median_sum += nums1[idx_1", "+ idx_2 == median[1] and median[1] is not None: median_sum += nums1[idx_1] break", "nums1[median[0]] median_sum = idx_1 = idx_2 = 0 while True: if median[0] is", "ValueError('Two arrays cannot both be empty.') is_even = True if total_len % 2", "1 else: median[0] = int((total_len - 1) / 2) if len(nums1) == 0:", "median_sum = idx_1 = idx_2 = 0 while True: if median[0] is None", "len(nums1) + len(nums2) if total_len == 0: raise ValueError('Two arrays cannot both be", "is not None and idx_1 + idx_2 == median[0] - 1: median_sum +=", "cannot both be empty.') is_even = True if total_len % 2 == 0", "len(nums2) == 0: if is_even: return (nums1[median[0]] + nums1[median[1]]) / 2.0 else: return", "2 == 0 else False median = [None, None] if is_even: median[1] =", "/ 2.0 else: return nums1[median[0]] median_sum = idx_1 = idx_2 = 0 while", "if idx_2 == len(nums2): if median[0] is not None and idx_1 + idx_2", "1] break if median[1] is not None and idx_1 + idx_2 == median[1]:", "class Solution(object): def findMedianSortedArrays(self, nums1, nums2): \"\"\" :type nums1: List[int] :type nums2: List[int]", "== median[1]: median_sum += nums1[idx_1] break idx_1 += 1 continue if nums1[idx_1] ==", "0: raise ValueError('Two arrays cannot both be empty.') is_even = True if total_len", "and idx_1 + idx_2 == median[0] - 1: median_sum += nums1[idx_1] median[0] =", "+ idx_2 == median[0]: median_sum += nums2[idx_2] idx_2 += 1 median[0] = None", "== median[1] and median[1] is not None: median_sum += nums1[idx_1] break idx_1 +=", "None: median_sum += nums1[idx_1 + 1] break if median[1] is not None and", "+ idx_2 == median[0]: median_sum += nums1[idx_1] median[0] = None if median[1] is", "median_sum += nums1[idx_1] if median[1] is not None: median_sum += nums1[idx_1 + 1]", "None: median_sum += nums1[idx_1] break idx_1 += 1 idx_2 += 1 elif nums1[idx_1]", "None: break if idx_1 == len(nums1): if median[0] is not None and idx_1", "median_sum += nums2[idx_2 + 1] break if median[1] is not None and idx_1", "== median[1]: median_sum += nums2[idx_2] break idx_2 += 1 else: if median[0] is", "Solution(object): def findMedianSortedArrays(self, nums1, nums2): \"\"\" :type nums1: List[int] :type nums2: List[int] :rtype:", "not None and idx_1 + idx_2 == median[0] - 1: median_sum += nums1[idx_1]", "(nums1[median[0]] + nums1[median[1]]) / 2.0 else: return nums1[median[0]] median_sum = idx_1 = idx_2", "None: median_sum += nums2[idx_2] break if idx_1 + idx_2 == median[1] and median[1]", "# class Solution(object): def findMedianSortedArrays(self, nums1, nums2): \"\"\" :type nums1: List[int] :type nums2:", "idx_2 += 1 median[0] = None continue if median[1] is not None and", "idx_2 == median[0]: median_sum += nums2[idx_2] idx_2 += 1 median[0] = None continue", "idx_2 == median[1]: median_sum += nums1[idx_1] break idx_1 += 1 continue if nums1[idx_1]", "== len(nums2): if median[0] is not None and idx_1 + idx_2 == median[0]:", "if median[1] is not None: median_sum += nums1[idx_1 + 1] break if median[1]", "break idx_1 += 1 continue if nums1[idx_1] == nums2[idx_2]: if median[0] is not", "https://leetcode.com/problems/median-of-two-sorted-arrays/ # class Solution(object): def findMedianSortedArrays(self, nums1, nums2): \"\"\" :type nums1: List[int] :type", "and idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] idx_1 += 1 median[0]", "median[1] is not None: median_sum += nums2[idx_2] break if idx_1 + idx_2 ==", "% 2 == 0 else False median = [None, None] if is_even: median[1]", "idx_1 + idx_2 == median[1]: median_sum += nums1[idx_1] break idx_1 += 1 continue", "is not None: median_sum += nums2[idx_2] break if idx_1 + idx_2 == median[1]", "median_sum / 2.0 if is_even else median_sum s = Solution() s.findMedianSortedArrays([1, 2], [1,", "median[1]: median_sum += nums2[idx_2] break idx_2 += 1 continue if idx_2 == len(nums2):", "nums2[idx_2] break idx_2 += 1 continue if idx_2 == len(nums2): if median[0] is", "and median[1] is None: break if idx_1 == len(nums1): if median[0] is not", "idx_2 += 1 continue if idx_2 == len(nums2): if median[0] is not None", "median_sum += nums1[idx_1] break idx_1 += 1 return median_sum / 2.0 if is_even", "1 idx_2 += 1 elif nums1[idx_1] > nums2[idx_2]: if median[0] is not None", "median_sum += nums2[idx_2] idx_2 += 1 median[0] = None continue if median[1] is", "+= nums1[idx_1] if median[1] is not None: median_sum += nums1[idx_1 + 1] break", "= None if median[1] is not None: median_sum += nums2[idx_2] break if idx_1", "0: if is_even: return (nums1[median[0]] + nums1[median[1]]) / 2.0 else: return nums1[median[0]] median_sum", "= median[1] - 1 else: median[0] = int((total_len - 1) / 2) if", "nums1[idx_1 + 1] break if median[1] is not None and idx_1 + idx_2", "nums2[idx_2] break if idx_1 + idx_2 == median[1] and median[1] is not None:", "nums1 is None or nums2 is None: raise ValueError('Inputs should be arrays.') total_len", "median[0]: median_sum += nums2[idx_2] idx_2 += 1 median[0] = None continue if median[1]", "idx_1 += 1 median[0] = None continue if median[1] is not None and", "+= nums1[idx_1] break idx_1 += 1 return median_sum / 2.0 if is_even else", "== 0: if is_even: return (nums1[median[0]] + nums1[median[1]]) / 2.0 else: return nums1[median[0]]", "not None: median_sum += nums1[idx_1] break idx_1 += 1 idx_2 += 1 elif", "+ len(nums2) if total_len == 0: raise ValueError('Two arrays cannot both be empty.')", "== median[0]: median_sum += nums1[idx_1] idx_1 += 1 median[0] = None continue if", "1 elif nums1[idx_1] > nums2[idx_2]: if median[0] is not None and idx_1 +", "None: raise ValueError('Inputs should be arrays.') total_len = len(nums1) + len(nums2) if total_len", "+ 1] break if median[1] is not None and idx_1 + idx_2 ==", "+= nums1[idx_1] median[0] = None if median[1] is not None: median_sum += nums2[idx_2]", "is_even: median[1] = int(total_len / 2) median[0] = median[1] - 1 else: median[0]", "+= nums2[idx_2] break idx_2 += 1 continue if idx_2 == len(nums2): if median[0]", "median[0] = int((total_len - 1) / 2) if len(nums1) == 0: if is_even:", "List[int] :rtype: float \"\"\" if nums1 is None or nums2 is None: raise", "1 median[0] = None continue if median[1] is not None and idx_1 +", "nums1[idx_1] break idx_1 += 1 return median_sum / 2.0 if is_even else median_sum", "is None and median[1] is None: break if idx_1 == len(nums1): if median[0]", "None] if is_even: median[1] = int(total_len / 2) median[0] = median[1] - 1", "len(nums1) == 0: if is_even: return (nums2[median[0]] + nums2[median[1]]) / 2.0 else: return", "[None, None] if is_even: median[1] = int(total_len / 2) median[0] = median[1] -", "idx_2 += 1 else: if median[0] is not None and idx_1 + idx_2", "\"\"\" :type nums1: List[int] :type nums2: List[int] :rtype: float \"\"\" if nums1 is", "+= 1 continue if idx_2 == len(nums2): if median[0] is not None and", "arrays cannot both be empty.') is_even = True if total_len % 2 ==", "and idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] if median[1] is not", "None and idx_1 + idx_2 == median[0]: median_sum += nums2[idx_2] idx_2 += 1", "median_sum += nums2[idx_2] if median[1] is not None: median_sum += nums2[idx_2 + 1]", "not None and idx_1 + idx_2 == median[0]: median_sum += nums2[idx_2] if median[1]", "break if idx_1 + idx_2 == median[1] and median[1] is not None: median_sum", "idx_2 = 0 while True: if median[0] is None and median[1] is None:", "break idx_2 += 1 continue if idx_2 == len(nums2): if median[0] is not", "median[0] is not None and idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1]", "or nums2 is None: raise ValueError('Inputs should be arrays.') total_len = len(nums1) +", "int(total_len / 2) median[0] = median[1] - 1 else: median[0] = int((total_len -", "median[1] is not None: median_sum += nums1[idx_1 + 1] break if median[1] is", "continue if nums1[idx_1] == nums2[idx_2]: if median[0] is not None and idx_1 +", "0 else False median = [None, None] if is_even: median[1] = int(total_len /", "None and median[1] is None: break if idx_1 == len(nums1): if median[0] is", "is not None and idx_1 + idx_2 == median[1]: median_sum += nums1[idx_1] break", "/ 2.0 if is_even else median_sum s = Solution() s.findMedianSortedArrays([1, 2], [1, 2])", "+= 1 continue if nums1[idx_1] == nums2[idx_2]: if median[0] is not None and", "2.0 else: return nums1[median[0]] median_sum = idx_1 = idx_2 = 0 while True:", "+ idx_2 == median[0]: median_sum += nums2[idx_2] if median[1] is not None: median_sum", "nums2[idx_2 + 1] break if median[1] is not None and idx_1 + idx_2", "if nums1[idx_1] == nums2[idx_2]: if median[0] is not None and idx_1 + idx_2", "median_sum += nums2[idx_2] break idx_2 += 1 continue if idx_2 == len(nums2): if", "idx_2 == median[1]: median_sum += nums2[idx_2] break idx_2 += 1 else: if median[0]", "+= 1 return median_sum / 2.0 if is_even else median_sum s = Solution()", "not None and idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] if median[1]", "else False median = [None, None] if is_even: median[1] = int(total_len / 2)", "if total_len % 2 == 0 else False median = [None, None] if", "is_even = True if total_len % 2 == 0 else False median =", "int((total_len - 1) / 2) if len(nums1) == 0: if is_even: return (nums2[median[0]]", "should be arrays.') total_len = len(nums1) + len(nums2) if total_len == 0: raise", "if idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] median[0] = None if", "else: return nums1[median[0]] median_sum = idx_1 = idx_2 = 0 while True: if", "median_sum += nums1[idx_1] break idx_1 += 1 continue if nums1[idx_1] == nums2[idx_2]: if", "is not None and idx_1 + idx_2 == median[0]: median_sum += nums2[idx_2] idx_2", "== median[1]: median_sum += nums1[idx_1] break idx_1 += 1 return median_sum / 2.0", "== 0 else False median = [None, None] if is_even: median[1] = int(total_len", "median[0] is None and median[1] is None: break if idx_1 == len(nums1): if", "median_sum += nums2[idx_2] break idx_2 += 1 else: if median[0] is not None", "idx_2 == median[0] - 1: median_sum += nums1[idx_1] median[0] = None if idx_1", "median_sum += nums1[idx_1] idx_1 += 1 median[0] = None continue if median[1] is", "is not None: median_sum += nums1[idx_1 + 1] break if median[1] is not", "break idx_2 += 1 else: if median[0] is not None and idx_1 +", "== median[0] - 1: median_sum += nums1[idx_1] median[0] = None if idx_1 +", "median_sum += nums1[idx_1] median[0] = None if median[1] is not None: median_sum +=", "median[1] is None: break if idx_1 == len(nums1): if median[0] is not None", "idx_2 == median[1]: median_sum += nums1[idx_1] break idx_1 += 1 return median_sum /", "<reponame>leonard-sxy/algorithm-practice<gh_stars>1-10 # ## https://leetcode.com/problems/median-of-two-sorted-arrays/ # class Solution(object): def findMedianSortedArrays(self, nums1, nums2): \"\"\" :type", "and idx_1 + idx_2 == median[0]: median_sum += nums2[idx_2] if median[1] is not", "+= nums2[idx_2 + 1] break if median[1] is not None and idx_1 +", "idx_2 == len(nums2): if median[0] is not None and idx_1 + idx_2 ==", "median[0]: median_sum += nums1[idx_1] idx_1 += 1 median[0] = None continue if median[1]", "median[1] is not None: median_sum += nums2[idx_2 + 1] break if median[1] is", "## https://leetcode.com/problems/median-of-two-sorted-arrays/ # class Solution(object): def findMedianSortedArrays(self, nums1, nums2): \"\"\" :type nums1: List[int]", "median[1] and median[1] is not None: median_sum += nums1[idx_1] break idx_1 += 1", "raise ValueError('Inputs should be arrays.') total_len = len(nums1) + len(nums2) if total_len ==", "nums1, nums2): \"\"\" :type nums1: List[int] :type nums2: List[int] :rtype: float \"\"\" if", "both be empty.') is_even = True if total_len % 2 == 0 else", "is None: raise ValueError('Inputs should be arrays.') total_len = len(nums1) + len(nums2) if", "idx_1 + idx_2 == median[0] - 1: median_sum += nums1[idx_1] median[0] = None", "median[1] is not None and idx_1 + idx_2 == median[1]: median_sum += nums1[idx_1]", "is not None: median_sum += nums2[idx_2 + 1] break if median[1] is not", "median = [None, None] if is_even: median[1] = int(total_len / 2) median[0] =", "True if total_len % 2 == 0 else False median = [None, None]", "arrays.') total_len = len(nums1) + len(nums2) if total_len == 0: raise ValueError('Two arrays", "idx_1 + idx_2 == median[0]: median_sum += nums2[idx_2] if median[1] is not None:", "idx_2 == median[0]: median_sum += nums1[idx_1] idx_1 += 1 median[0] = None continue", "median[0]: median_sum += nums1[idx_1] if median[1] is not None: median_sum += nums1[idx_1 +", "== median[0]: median_sum += nums2[idx_2] idx_2 += 1 median[0] = None continue if", "be empty.') is_even = True if total_len % 2 == 0 else False", "idx_1 += 1 return median_sum / 2.0 if is_even else median_sum s =", "median[0] is not None and idx_1 + idx_2 == median[0]: median_sum += nums2[idx_2]", "empty.') is_even = True if total_len % 2 == 0 else False median", "+= nums2[idx_2] if median[1] is not None: median_sum += nums2[idx_2 + 1] break", "idx_1 = idx_2 = 0 while True: if median[0] is None and median[1]", "if median[0] is None and median[1] is None: break if idx_1 == len(nums1):", "= idx_1 = idx_2 = 0 while True: if median[0] is None and", "2.0 else: return nums2[median[0]] if len(nums2) == 0: if is_even: return (nums1[median[0]] +", "else: return nums2[median[0]] if len(nums2) == 0: if is_even: return (nums1[median[0]] + nums1[median[1]])", "median[1] - 1 else: median[0] = int((total_len - 1) / 2) if len(nums1)", "total_len == 0: raise ValueError('Two arrays cannot both be empty.') is_even = True", "1 return median_sum / 2.0 if is_even else median_sum s = Solution() s.findMedianSortedArrays([1,", "= None if idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] median[0] =", "+= nums1[idx_1] median[0] = None if idx_1 + idx_2 == median[0]: median_sum +=", "if is_even: median[1] = int(total_len / 2) median[0] = median[1] - 1 else:", "+= 1 elif nums1[idx_1] > nums2[idx_2]: if median[0] is not None and idx_1", "idx_1 += 1 continue if nums1[idx_1] == nums2[idx_2]: if median[0] is not None", "- 1 else: median[0] = int((total_len - 1) / 2) if len(nums1) ==", "raise ValueError('Two arrays cannot both be empty.') is_even = True if total_len %", "elif nums1[idx_1] > nums2[idx_2]: if median[0] is not None and idx_1 + idx_2", "median[0] = None if median[1] is not None: median_sum += nums2[idx_2] break if", "not None and idx_1 + idx_2 == median[1]: median_sum += nums2[idx_2] break idx_2", "+ idx_2 == median[0] - 1: median_sum += nums1[idx_1] median[0] = None if", "idx_1 == len(nums1): if median[0] is not None and idx_1 + idx_2 ==", "not None and idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] idx_1 +=", "median[1] is not None and idx_1 + idx_2 == median[1]: median_sum += nums2[idx_2]", "median[1] is not None: median_sum += nums1[idx_1] break idx_1 += 1 idx_2 +=", "return (nums1[median[0]] + nums1[median[1]]) / 2.0 else: return nums1[median[0]] median_sum = idx_1 =", "median[1]: median_sum += nums1[idx_1] break idx_1 += 1 continue if nums1[idx_1] == nums2[idx_2]:", "nums1[idx_1] break idx_1 += 1 continue if nums1[idx_1] == nums2[idx_2]: if median[0] is", "and idx_1 + idx_2 == median[0]: median_sum += nums2[idx_2] idx_2 += 1 median[0]", "nums2[idx_2] if median[1] is not None: median_sum += nums2[idx_2 + 1] break if", "idx_2 == median[1] and median[1] is not None: median_sum += nums1[idx_1] break idx_1", "None and idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] if median[1] is", "median_sum += nums1[idx_1] median[0] = None if idx_1 + idx_2 == median[0]: median_sum", "\"\"\" if nums1 is None or nums2 is None: raise ValueError('Inputs should be", "break idx_1 += 1 return median_sum / 2.0 if is_even else median_sum s", "is not None and idx_1 + idx_2 == median[0]: median_sum += nums2[idx_2] if", "idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] idx_1 += 1 median[0] =", "is_even: return (nums1[median[0]] + nums1[median[1]]) / 2.0 else: return nums1[median[0]] median_sum = idx_1", "continue if idx_2 == len(nums2): if median[0] is not None and idx_1 +", "findMedianSortedArrays(self, nums1, nums2): \"\"\" :type nums1: List[int] :type nums2: List[int] :rtype: float \"\"\"", "True: if median[0] is None and median[1] is None: break if idx_1 ==", "nums1: List[int] :type nums2: List[int] :rtype: float \"\"\" if nums1 is None or", "+= 1 median[0] = None continue if median[1] is not None and idx_1", "idx_1 + idx_2 == median[1]: median_sum += nums1[idx_1] break idx_1 += 1 return", "/ 2) median[0] = median[1] - 1 else: median[0] = int((total_len - 1)", "/ 2) if len(nums1) == 0: if is_even: return (nums2[median[0]] + nums2[median[1]]) /", "/ 2.0 else: return nums2[median[0]] if len(nums2) == 0: if is_even: return (nums1[median[0]]", ":rtype: float \"\"\" if nums1 is None or nums2 is None: raise ValueError('Inputs", "not None: median_sum += nums2[idx_2 + 1] break if median[1] is not None", "median_sum += nums1[idx_1] break idx_1 += 1 idx_2 += 1 elif nums1[idx_1] >", "ValueError('Inputs should be arrays.') total_len = len(nums1) + len(nums2) if total_len == 0:", "else: median[0] = int((total_len - 1) / 2) if len(nums1) == 0: if", "median[0]: median_sum += nums1[idx_1] median[0] = None if median[1] is not None: median_sum", "idx_2 == median[0]: median_sum += nums1[idx_1] median[0] = None if median[1] is not", "total_len % 2 == 0 else False median = [None, None] if is_even:", "return (nums2[median[0]] + nums2[median[1]]) / 2.0 else: return nums2[median[0]] if len(nums2) == 0:", "= [None, None] if is_even: median[1] = int(total_len / 2) median[0] = median[1]", "+ nums1[median[1]]) / 2.0 else: return nums1[median[0]] median_sum = idx_1 = idx_2 =", "not None and idx_1 + idx_2 == median[1]: median_sum += nums1[idx_1] break idx_1", "None if idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] median[0] = None", "nums1[idx_1] if median[1] is not None: median_sum += nums1[idx_1 + 1] break if", "nums2[idx_2]: if median[0] is not None and idx_1 + idx_2 == median[0] -", "median[1]: median_sum += nums2[idx_2] break idx_2 += 1 else: if median[0] is not", "def findMedianSortedArrays(self, nums1, nums2): \"\"\" :type nums1: List[int] :type nums2: List[int] :rtype: float", "idx_2 == median[0]: median_sum += nums2[idx_2] if median[1] is not None: median_sum +=", "not None and idx_1 + idx_2 == median[0]: median_sum += nums2[idx_2] idx_2 +=", "if nums1 is None or nums2 is None: raise ValueError('Inputs should be arrays.')", "and idx_1 + idx_2 == median[1]: median_sum += nums2[idx_2] break idx_2 += 1", "median[1] = int(total_len / 2) median[0] = median[1] - 1 else: median[0] =", "if is_even: return (nums2[median[0]] + nums2[median[1]]) / 2.0 else: return nums2[median[0]] if len(nums2)", "is_even: return (nums2[median[0]] + nums2[median[1]]) / 2.0 else: return nums2[median[0]] if len(nums2) ==", "median[1]: median_sum += nums1[idx_1] break idx_1 += 1 return median_sum / 2.0 if", "nums1[idx_1] break idx_1 += 1 idx_2 += 1 elif nums1[idx_1] > nums2[idx_2]: if", "- 1: median_sum += nums1[idx_1] median[0] = None if idx_1 + idx_2 ==", "None: median_sum += nums2[idx_2 + 1] break if median[1] is not None and", "+= nums2[idx_2] break idx_2 += 1 else: if median[0] is not None and", "return nums1[median[0]] median_sum = idx_1 = idx_2 = 0 while True: if median[0]", "if idx_1 == len(nums1): if median[0] is not None and idx_1 + idx_2", "idx_2 == median[0]: median_sum += nums1[idx_1] if median[1] is not None: median_sum +=", "nums1[idx_1] median[0] = None if idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1]", "total_len = len(nums1) + len(nums2) if total_len == 0: raise ValueError('Two arrays cannot", "is not None: median_sum += nums1[idx_1] break idx_1 += 1 idx_2 += 1", "nums2): \"\"\" :type nums1: List[int] :type nums2: List[int] :rtype: float \"\"\" if nums1", "None or nums2 is None: raise ValueError('Inputs should be arrays.') total_len = len(nums1)", "2) median[0] = median[1] - 1 else: median[0] = int((total_len - 1) /", "+= nums1[idx_1 + 1] break if median[1] is not None and idx_1 +", "+= nums1[idx_1] break idx_1 += 1 idx_2 += 1 elif nums1[idx_1] > nums2[idx_2]:", "+ idx_2 == median[0]: median_sum += nums1[idx_1] idx_1 += 1 median[0] = None", "idx_1 += 1 idx_2 += 1 elif nums1[idx_1] > nums2[idx_2]: if median[0] is", "len(nums2) if total_len == 0: raise ValueError('Two arrays cannot both be empty.') is_even", "None continue if median[1] is not None and idx_1 + idx_2 == median[1]:", "is not None and idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] if", "median_sum += nums2[idx_2] break if idx_1 + idx_2 == median[1] and median[1] is", "- 1) / 2) if len(nums1) == 0: if is_even: return (nums2[median[0]] +", "1 continue if idx_2 == len(nums2): if median[0] is not None and idx_1", "idx_1 + idx_2 == median[1]: median_sum += nums2[idx_2] break idx_2 += 1 else:", "== 0: if is_even: return (nums2[median[0]] + nums2[median[1]]) / 2.0 else: return nums2[median[0]]", "if median[1] is not None: median_sum += nums2[idx_2 + 1] break if median[1]", "else: if median[0] is not None and idx_1 + idx_2 == median[0]: median_sum", "== 0: raise ValueError('Two arrays cannot both be empty.') is_even = True if", ":type nums1: List[int] :type nums2: List[int] :rtype: float \"\"\" if nums1 is None", "nums2 is None: raise ValueError('Inputs should be arrays.') total_len = len(nums1) + len(nums2)", "if median[1] is not None and idx_1 + idx_2 == median[1]: median_sum +=", "None if median[1] is not None: median_sum += nums2[idx_2] break if idx_1 +", "if len(nums2) == 0: if is_even: return (nums1[median[0]] + nums1[median[1]]) / 2.0 else:", "+ idx_2 == median[1]: median_sum += nums2[idx_2] break idx_2 += 1 else: if", "0 while True: if median[0] is None and median[1] is None: break if", "is None: break if idx_1 == len(nums1): if median[0] is not None and", "if is_even: return (nums1[median[0]] + nums1[median[1]]) / 2.0 else: return nums1[median[0]] median_sum =", "is None or nums2 is None: raise ValueError('Inputs should be arrays.') total_len =", "idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] median[0] = None if median[1]", "median[0] = median[1] - 1 else: median[0] = int((total_len - 1) / 2)", "if total_len == 0: raise ValueError('Two arrays cannot both be empty.') is_even =", "0: if is_even: return (nums2[median[0]] + nums2[median[1]]) / 2.0 else: return nums2[median[0]] if", "+ idx_2 == median[0]: median_sum += nums1[idx_1] if median[1] is not None: median_sum", "= int(total_len / 2) median[0] = median[1] - 1 else: median[0] = int((total_len", "idx_1 + idx_2 == median[1]: median_sum += nums2[idx_2] break idx_2 += 1 continue", "> nums2[idx_2]: if median[0] is not None and idx_1 + idx_2 == median[0]:", "1) / 2) if len(nums1) == 0: if is_even: return (nums2[median[0]] + nums2[median[1]])", "== nums2[idx_2]: if median[0] is not None and idx_1 + idx_2 == median[0]", "median[0] - 1: median_sum += nums1[idx_1] median[0] = None if idx_1 + idx_2", "break idx_1 += 1 idx_2 += 1 elif nums1[idx_1] > nums2[idx_2]: if median[0]", "if idx_1 + idx_2 == median[1] and median[1] is not None: median_sum +=", "nums2[idx_2] idx_2 += 1 median[0] = None continue if median[1] is not None", "nums1[idx_1] idx_1 += 1 median[0] = None continue if median[1] is not None", "idx_2 += 1 elif nums1[idx_1] > nums2[idx_2]: if median[0] is not None and", "+ idx_2 == median[1]: median_sum += nums2[idx_2] break idx_2 += 1 continue if", "None and idx_1 + idx_2 == median[0] - 1: median_sum += nums1[idx_1] median[0]", "nums2[median[0]] if len(nums2) == 0: if is_even: return (nums1[median[0]] + nums1[median[1]]) / 2.0", ":type nums2: List[int] :rtype: float \"\"\" if nums1 is None or nums2 is", "None and idx_1 + idx_2 == median[0]: median_sum += nums2[idx_2] if median[1] is", "not None: median_sum += nums1[idx_1 + 1] break if median[1] is not None", "nums1[idx_1] == nums2[idx_2]: if median[0] is not None and idx_1 + idx_2 ==", "median[0] = None if idx_1 + idx_2 == median[0]: median_sum += nums1[idx_1] median[0]", "(nums2[median[0]] + nums2[median[1]]) / 2.0 else: return nums2[median[0]] if len(nums2) == 0: if", "nums1[idx_1] median[0] = None if median[1] is not None: median_sum += nums2[idx_2] break", "== median[1]: median_sum += nums2[idx_2] break idx_2 += 1 continue if idx_2 ==", "== median[0]: median_sum += nums1[idx_1] median[0] = None if median[1] is not None:", "2) if len(nums1) == 0: if is_even: return (nums2[median[0]] + nums2[median[1]]) / 2.0", "List[int] :type nums2: List[int] :rtype: float \"\"\" if nums1 is None or nums2", "= True if total_len % 2 == 0 else False median = [None,", "len(nums1): if median[0] is not None and idx_1 + idx_2 == median[0]: median_sum", "if median[0] is not None and idx_1 + idx_2 == median[0]: median_sum +=", "+ nums2[median[1]]) / 2.0 else: return nums2[median[0]] if len(nums2) == 0: if is_even:", "break if idx_1 == len(nums1): if median[0] is not None and idx_1 +", "nums1[idx_1] > nums2[idx_2]: if median[0] is not None and idx_1 + idx_2 ==", "+= nums1[idx_1] break idx_1 += 1 continue if nums1[idx_1] == nums2[idx_2]: if median[0]", "+= nums2[idx_2] idx_2 += 1 median[0] = None continue if median[1] is not", "median[0]: median_sum += nums2[idx_2] if median[1] is not None: median_sum += nums2[idx_2 +", "and median[1] is not None: median_sum += nums1[idx_1] break idx_1 += 1 idx_2", "= int((total_len - 1) / 2) if len(nums1) == 0: if is_even: return", "if median[1] is not None: median_sum += nums2[idx_2] break if idx_1 + idx_2", "None and idx_1 + idx_2 == median[1]: median_sum += nums2[idx_2] break idx_2 +=", "+ idx_2 == median[1]: median_sum += nums1[idx_1] break idx_1 += 1 return median_sum", "+ idx_2 == median[1]: median_sum += nums1[idx_1] break idx_1 += 1 continue if" ]
[ "limitations under the License. \"\"\" import discord import json import os import requests", "member off server', ignore_extra=True, hidden=True, enabled=False) async def kick(self, ctx, member: discord.member, *,", "ctx): await ctx.channel.send(f\"Pong... {round(self.bot.latency * 1000)} ms\") @commands.command(name='server', description='Server information', help='Server information', ignore_extra=True,", "Utility(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_ready(self): print('- Utility", "= bot @commands.Cog.listener() async def on_ready(self): print('- Utility Cog loaded') @commands.command(name='ban', description='Ban member", "role in member.roles]).replace('@', ''))}\\n\") @commands.command(name='members', description='Current Members', help='Current Members', ignore_extra=True, hidden=False, enabled=True) async", "f\"{member.name} (\" members += ', '.join([role.name for role in member.roles]) + \")\\n\" else:", "Description: {guild.description}\\n\" f\"Region: {guild.region}\\n\" f\"File Size Limit: {guild.filesize_limit} bytes\\n\\n\" f\"**Text Channels:**\\n - {text_channels}\\n\\n\"", "\"\"\" Bishbot - https://github.com/ldgregory/bishbot <NAME> <<EMAIL>> fun.py v0.1 Tested to Python v3.7.3 Description:", "in guild.members: if str(member.id) == str(mention.lstrip('<@!').rstrip('>')): await ctx.channel.send(f\"Nickname: {member.nick}\\n\" f\"Discord Name: {member.name}#{member.discriminator}\\n\" f\"Discord", "'Rain', '11': 'Thunderstorm', '13': 'Snow', '50': 'Mist'} aqius_mapping = {'Good': range(0, 50), 'Moderate':", "a human friendly # equivilant. data += f\"Air Quality Index: {jsonResponse['data']['current']['pollution']['aqius']} - \"", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "provided by abuseipdb.com**\\n\\n' for k, v in ip_info['data'].items(): data += f\"{k}: {v}\\n\" await", "New_Mexico\") @commands.command(name='whoami', description='Info about you', help='Info about you', ignore_extra=True, hidden=True, enabled=True) async def", "+= f\"{weather_mapping[jsonResponse['data']['current']['weather']['ic'][:-1]]}\\n\" # Print out the AQIUS then do a map to ranges", "use underscores instead of spaces in city or state, i.e. Sante_Fe New_Mexico\") @commands.command(name='whoami',", "description='Get abuse score for IP', help='Get abuse score for IP', ignore_extra=True, hidden=False, enabled=True)", "enabled=True) async def ping(self, ctx): await ctx.channel.send(f\"Pong... {round(self.bot.latency * 1000)} ms\") @commands.command(name='server', description='Server", "Channels:**\\n - {text_channels}\\n\\n\" f\"**Voice Channels:**\\n - {voice_channels}\\n\\n\" f\"**Server Members: {guild.member_count}**\\n - {members}\") @commands.command(name='unban',", "= { 'Accept': 'application/json', 'Key': ABUSEIPDB_KEY } response = requests.request(method='GET', url=url, headers=headers, params=querystring)", "Members: {guild.member_count}**\\n{members.replace('@', '')}\") @commands.command(name='nickname', description='Change nickname', help='Change nickname', ignore_extra=True, hidden=False, enabled=True) async def", "range(237, 259), 'W': range(260, 281), 'WNW': range(282, 304), 'NW': range(305, 326), 'NNW': range(327,", "nickname', ignore_extra=True, hidden=False, enabled=True) async def nickname(self, ctx, *, nickname): await ctx.author.edit(nick=f\"{nickname}\") #", "val: data += f\"{jsonResponse['data']['current']['weather']['wd']}° ({key})\\n\" await ctx.channel.send(data) else: await ctx.channel.send(f\"No data found. Make", "latency', help='Ping latency', ignore_extra=True, hidden=False, enabled=True) async def ping(self, ctx): await ctx.channel.send(f\"Pong... {round(self.bot.latency", "01d or 01n to differentiate day or night, we don't care # so", "f\"{k}: {v}\\n\" await ctx.channel.send(data) @commands.command(name='kick', description='Kick member off server', help='Kick member off server',", "Name: {member.name}#{member.discriminator}\\n\" f\"Discord ID: {member.id}\\n\" f\"Joined: {member.joined_at}\\n\" f\"Status: {member.status}\\n\" # f\"Is on Mobile:", "# such as conversion of C to F temps and wind_mapping where NNE", "License. \"\"\" import discord import json import os import requests from discord.ext import", "dotenv import load_dotenv load_dotenv() BOT_PREFIX = os.getenv('DISCORD_BOT_PREFIX') GUILD = os.getenv('DISCORD_GUILD') AIRVISUAL_KEY = os.getenv('AIRVISUAL_KEY')", "(\" members += ', '.join([role.name for role in member.roles]) + \")\\n\" else: members", "Role: {member.top_role}\\n\" f\"Roles: {str(', '.join([role.name for role in member.roles]).replace('@', ''))}\\n\") @commands.command(name='members', description='Current Members',", "ignore_extra=True, hidden=False, enabled=True) async def ping(self, ctx): await ctx.channel.send(f\"Pong... {round(self.bot.latency * 1000)} ms\")", "where NNE is # actually 11.25 - 33.75 degrees vs the ints required", "{int(jsonResponse['data']['current']['weather']['ws'] * 2.236936)} m/h from \" for key, val in wind_mapping.items(): if int(jsonResponse['data']['current']['weather']['wd'])", "License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "'Scattered Clouds', '04': 'Broken Clouds', '09': 'Shower Rain', '10': 'Rain', '11': 'Thunderstorm', '13':", "await ctx.channel.send(data) @commands.command(name='kick', description='Kick member off server', help='Kick member off server', ignore_extra=True, hidden=True,", "await member.ban(reason=reason) await ctx.send(f\"Banned {member.mention}\") @commands.command(name='clear', description='Clear x messages, defaults to 3', help='Clear", "== 'success': weather_mapping = {'01': 'Clear Sky', '02': 'Few Clouds', '03': 'Scattered Clouds',", "'Key': ABUSEIPDB_KEY } response = requests.request(method='GET', url=url, headers=headers, params=querystring) ip_info = json.loads(response.text) data", "'Unhealthy': range(151, 200), 'Very Unhealthy': range(201, 300), 'Hazardous': range(301, 500)} wind_mapping = {'North':", "124), 'SE': range(125, 146), 'SSE': range(147, 169), 'S': range(170, 191), 'SSW': range(192, 214),", "f\"Pressure: {jsonResponse['data']['current']['weather']['pr']} hPa\\n\" data += f\"Humidity: {jsonResponse['data']['current']['weather']['hu']}%\\n\" data += f\"Wind: {int(jsonResponse['data']['current']['weather']['ws'] * 2.236936)}", "commands for everyone Changelog: 20210606 - Fixed members command for intents 20200522 -", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "import commands from dotenv import load_dotenv load_dotenv() BOT_PREFIX = os.getenv('DISCORD_BOT_PREFIX') GUILD = os.getenv('DISCORD_GUILD')", "as conversion of C to F temps and wind_mapping where NNE is #", "url = f\"https://api.airvisual.com/v2/city?city={city.replace('_', '%20')}&state={state.replace('_', '%20')}&country=USA&key={AIRVISUAL_KEY}\" response = requests.request(method='GET', url=url) jsonResponse = json.loads(response.text) if", "description='Ban member from server', help='Ban member from server', ignore_extra=True, hidden=True, enabled=False) async def", "{'North': range(349, 360), 'N': range(0, 11), 'NNE': range(12, 34), 'NE': range(35, 56), 'ENE':", "val in wind_mapping.items(): if int(jsonResponse['data']['current']['weather']['wd']) in val: data += f\"{jsonResponse['data']['current']['weather']['wd']}° ({key})\\n\" await ctx.channel.send(data)", "34), 'NE': range(35, 56), 'ENE': range(57, 79), 'E': range(80, 101), 'ESE': range(102, 124),", "Tested to Python v3.7.3 Description: Fun commands for everyone Changelog: 20210606 - Fixed", "#! /usr/bin/env python3 \"\"\" Bishbot - https://github.com/ldgregory/bishbot <NAME> <<EMAIL>> fun.py v0.1 Tested to", "Rain', '10': 'Rain', '11': 'Thunderstorm', '13': 'Snow', '50': 'Mist'} aqius_mapping = {'Good': range(0,", "Python v3.7.3 Description: Fun commands for everyone Changelog: 20210606 - Fixed members command", "member in guild.members]) await ctx.channel.send(f\"Server Name: {guild.name} (ID: {guild.id})\\n\" f\"Server Owner: {guild.owner} (ID:", "discord.member, *, reason=None): if member.hasPermission('BAN_MEMBERS'): await member.ban(reason=reason) await ctx.send(f\"Banned {member.mention}\") @commands.command(name='clear', description='Clear x", "await ctx.channel.send(f\"Pong... {round(self.bot.latency * 1000)} ms\") @commands.command(name='server', description='Server information', help='Server information', ignore_extra=True, hidden=False,", "information', ignore_extra=True, hidden=False, enabled=True) async def member(self, ctx, mention): guild = discord.utils.get(self.bot.guilds, name=GUILD)", "x messages, defaults to 3', ignore_extra=True, hidden=False, enabled=True) @commands.has_permissions(manage_messages=True) async def clear(self, ctx,", "Groups': range(101, 150), 'Unhealthy': range(151, 200), 'Very Unhealthy': range(201, 300), 'Hazardous': range(301, 500)}", "'WNW': range(282, 304), 'NW': range(305, 326), 'NNW': range(327, 348)} data = f\"{jsonResponse['data']['city']}, {jsonResponse['data']['state']},", "str(mention.lstrip('<@!').rstrip('>')): await ctx.channel.send(f\"Nickname: {member.nick}\\n\" f\"Discord Name: {member.name}#{member.discriminator}\\n\" f\"Discord ID: {member.id}\\n\" f\"Joined: {member.joined_at}\\n\" f\"Status:", "Limit: {guild.filesize_limit} bytes\\n\\n\" f\"**Text Channels:**\\n - {text_channels}\\n\\n\" f\"**Voice Channels:**\\n - {voice_channels}\\n\\n\" f\"**Server Members:", "the License for the specific language governing permissions and limitations under the License.", "+= f\"{key}\\n\" data += f\"Temperature: {int(((jsonResponse['data']['current']['weather']['tp']) * 9) / 5) + 32}°F\\n\" data", "guild.members: if str(member.id) == str(mention.lstrip('<@!').rstrip('>')): await ctx.channel.send(f\"Nickname: {member.nick}\\n\" f\"Discord Name: {member.name}#{member.discriminator}\\n\" f\"Discord ID:", "360), 'N': range(0, 11), 'NNE': range(12, 34), 'NE': range(35, 56), 'ENE': range(57, 79),", "{ 'Accept': 'application/json', 'Key': ABUSEIPDB_KEY } response = requests.request(method='GET', url=url, headers=headers, params=querystring) ip_info", "ctx, *, nickname): await ctx.author.edit(nick=f\"{nickname}\") # role = get(ctx.message.server.roles, name='ROLE_NAME') # if role:", "server', ignore_extra=True, hidden=True, enabled=False) async def kick(self, ctx, member: discord.member, *, reason=None): if", "help='Change nickname', ignore_extra=True, hidden=False, enabled=True) async def nickname(self, ctx, *, nickname): await ctx.author.edit(nick=f\"{nickname}\")", "are some accuracy issues # such as conversion of C to F temps", "help='Server information', ignore_extra=True, hidden=False, enabled=True) @commands.has_role('admins') async def server(self, ctx): guild = discord.utils.get(self.bot.guilds,", "License for the specific language governing permissions and limitations under the License. \"\"\"", "ignore_extra=True, hidden=False, enabled=True) @commands.has_role('admins') async def server(self, ctx): guild = discord.utils.get(self.bot.guilds, name=GUILD) text_channels", "Unless required by applicable law or agreed to in writing, software distributed under", "server', ignore_extra=True, hidden=True, enabled=False) async def ban(self, ctx, member: discord.member, *, reason=None): if", "score for IP', help='Get abuse score for IP', ignore_extra=True, hidden=False, enabled=True) async def", "{user.mention}\") return @commands.command(name='weather', description='Weather as !weather CITY STATE', help='Weather as !weather Santa_Fe New_Mexico',", "differentiate day or night, we don't care # so we're just mapping the", "mapping the numerical part to the human friendly text version. data += f\"{weather_mapping[jsonResponse['data']['current']['weather']['ic'][:-1]]}\\n\"", "through like 01d or 01n to differentiate day or night, we don't care", "https://github.com/ldgregory/bishbot <NAME> <<EMAIL>> fun.py v0.1 Tested to Python v3.7.3 Description: Fun commands for", "Members: {guild.member_count}**\\n - {members}\") @commands.command(name='unban', description='Unban member from server', help='Unban member from server',", "100), 'Unhealthy for Sensitive Groups': range(101, 150), 'Unhealthy': range(151, 200), 'Very Unhealthy': range(201,", "range(170, 191), 'SSW': range(192, 214), 'SW': range(215, 236), 'WSW': range(237, 259), 'W': range(260,", "abuseipdb.com**\\n\\n' for k, v in ip_info['data'].items(): data += f\"{k}: {v}\\n\" await ctx.channel.send(data) @commands.command(name='kick',", "{member.guild}\\n\" f\"Guild Permissions: {member.guild_permissions}\\n\" f\"Top Role: {member.top_role}\\n\" f\"Roles: {str(', '.join([role.name for role in", "= '\\n - '.join([channel.name for channel in guild.voice_channels]) members = '\\n - '.join([member.name", "ignore_extra=True, hidden=True, enabled=False) async def unban(self, ctx, *, member): banned_users = await ctx.guild.bans()", "member): banned_users = await ctx.guild.bans() member_name, member_discriminator = member.split('#') for ban_entry in banned_users:", "ctx, city, state): # This intended as a 'good enough' tool. There are", "we don't care # so we're just mapping the numerical part to the", "hidden=False, enabled=True) async def nickname(self, ctx, *, nickname): await ctx.author.edit(nick=f\"{nickname}\") # role =", "def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_ready(self): print('- Utility Cog", "def clear(self, ctx, amount=3): await ctx.channel.purge(limit=amount) @commands.command(name='ip_abuse', description='Get abuse score for IP', help='Get", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "= {'North': range(349, 360), 'N': range(0, 11), 'NNE': range(12, 34), 'NE': range(35, 56),", "day or night, we don't care # so we're just mapping the numerical", "{member.guild_permissions}\\n\" f\"Top Role: {member.top_role}\\n\" f\"Roles: {str(', '.join([role.name for role in member.roles]).replace('@', ''))}\\n\") @commands.command(name='members',", "License, Version 2.0 (the \"License\"); you may not use this file except in", "channel in guild.voice_channels]) members = '\\n - '.join([member.name for member in guild.members]) await", "= json.loads(response.text) data = '**Data provided by abuseipdb.com**\\n\\n' for k, v in ip_info['data'].items():", "Clouds', '03': 'Scattered Clouds', '04': 'Broken Clouds', '09': 'Shower Rain', '10': 'Rain', '11':", "member.roles]) + \")\\n\" else: members += f\"- {member.display_name}\\n\" await ctx.channel.send(f\"**Server Members: {guild.member_count}**\\n{members.replace('@', '')}\")", "f\"https://api.airvisual.com/v2/city?city={city.replace('_', '%20')}&state={state.replace('_', '%20')}&country=USA&key={AIRVISUAL_KEY}\" response = requests.request(method='GET', url=url) jsonResponse = json.loads(response.text) if jsonResponse['status'] ==", "in guild.members]) await ctx.channel.send(f\"Server Name: {guild.name} (ID: {guild.id})\\n\" f\"Server Owner: {guild.owner} (ID: {guild.owner_id})\\n\"", "range(51, 100), 'Unhealthy for Sensitive Groups': range(101, 150), 'Unhealthy': range(151, 200), 'Very Unhealthy':", "member_discriminator = member.split('#') for ban_entry in banned_users: user = ban_entry.user if (user.name, user.discriminator)", "({key})\\n\" await ctx.channel.send(data) else: await ctx.channel.send(f\"No data found. Make sure to use underscores", "async def member(self, ctx, mention): guild = discord.utils.get(self.bot.guilds, name=GUILD) for member in guild.members:", "in guild.members: if showall == 'showall': members += f\"- {member.display_name} : \" members", "Mobile: {member.is_on_mobile}\\n\" # f\"Activity: {str(member.activity.type).lstrip('ActivityType.')} {member.activity.name}\\n\" f\"Guild: {member.guild}\\n\" f\"Guild Permissions: {member.guild_permissions}\\n\" f\"Top Role:", "json.loads(response.text) if jsonResponse['status'] == 'success': weather_mapping = {'01': 'Clear Sky', '02': 'Few Clouds',", "members(self, ctx, showall=None): guild = discord.utils.get(self.bot.guilds, name=GUILD) members = '' for member in", "'success': weather_mapping = {'01': 'Clear Sky', '02': 'Few Clouds', '03': 'Scattered Clouds', '04':", "\")\\n\" else: members += f\"- {member.display_name}\\n\" await ctx.channel.send(f\"**Server Members: {guild.member_count}**\\n{members.replace('@', '')}\") @commands.command(name='nickname', description='Change", "async def on_ready(self): print('- Utility Cog loaded') @commands.command(name='ban', description='Ban member from server', help='Ban", "'\\n - '.join([member.name for member in guild.members]) await ctx.channel.send(f\"Server Name: {guild.name} (ID: {guild.id})\\n\"", "async def kick(self, ctx, member: discord.member, *, reason=None): if member.hasPermission('KICK_MEMBERS'): await member.kick(reason=reason) await", "f\"- {member.display_name} : \" members += f\"{member.name} (\" members += ', '.join([role.name for", "def members(self, ctx, showall=None): guild = discord.utils.get(self.bot.guilds, name=GUILD) members = '' for member", "member from server', help='Unban member from server', ignore_extra=True, hidden=True, enabled=False) async def unban(self,", "Sensitive Groups': range(101, 150), 'Unhealthy': range(151, 200), 'Very Unhealthy': range(201, 300), 'Hazardous': range(301,", "{jsonResponse['data']['state']}, {jsonResponse['data']['country']}\\n\" data += f\"{jsonResponse['data']['location']['coordinates']}\\n\" # ic comes through like 01d or 01n", "f\"Guild: {member.guild}\\n\" f\"Guild Permissions: {member.guild_permissions}\\n\" f\"Top Role: {member.top_role}\\n\" f\"Roles: {str(', '.join([role.name for role", "to the human friendly text version. data += f\"{weather_mapping[jsonResponse['data']['current']['weather']['ic'][:-1]]}\\n\" # Print out the", "for the specific language governing permissions and limitations under the License. \"\"\" import", "role in member.roles]) + \")\\n\" else: members += f\"- {member.display_name}\\n\" await ctx.channel.send(f\"**Server Members:", "in banned_users: user = ban_entry.user if (user.name, user.discriminator) == (member_name, member_discriminator): await ctx.guild.unban(user)", "async def nickname(self, ctx, *, nickname): await ctx.author.edit(nick=f\"{nickname}\") # role = get(ctx.message.server.roles, name='ROLE_NAME')", "json import os import requests from discord.ext import commands from dotenv import load_dotenv", "'.join([role.name for role in member.roles]).replace('@', ''))}\\n\") @commands.command(name='members', description='Current Members', help='Current Members', ignore_extra=True, hidden=False,", "jsonResponse = json.loads(response.text) if jsonResponse['status'] == 'success': weather_mapping = {'01': 'Clear Sky', '02':", "ctx.channel.send(f\"Nickname: {member.nick}\\n\" f\"Discord Name: {member.name}#{member.discriminator}\\n\" f\"Discord ID: {member.id}\\n\" f\"Joined: {member.joined_at}\\n\" f\"Status: {member.status}\\n\" #", "async def clear(self, ctx, amount=3): await ctx.channel.purge(limit=amount) @commands.command(name='ip_abuse', description='Get abuse score for IP',", "@commands.command(name='kick', description='Kick member off server', help='Kick member off server', ignore_extra=True, hidden=True, enabled=False) async", "human friendly # equivilant. data += f\"Air Quality Index: {jsonResponse['data']['current']['pollution']['aqius']} - \" for", "'Mist'} aqius_mapping = {'Good': range(0, 50), 'Moderate': range(51, 100), 'Unhealthy for Sensitive Groups':", "members += f\"- {member.display_name} : \" members += f\"{member.name} (\" members += ',", "Copyright 2020 <NAME> Licensed under the Apache License, Version 2.0 (the \"License\"); you", "mention): guild = discord.utils.get(self.bot.guilds, name=GUILD) for member in guild.members: if str(member.id) == str(mention.lstrip('<@!').rstrip('>')):", "*, reason=None): if member.hasPermission('BAN_MEMBERS'): await member.ban(reason=reason) await ctx.send(f\"Banned {member.mention}\") @commands.command(name='clear', description='Clear x messages,", "ban_entry.user if (user.name, user.discriminator) == (member_name, member_discriminator): await ctx.guild.unban(user) await ctx.send(f\"Unbanned {user.mention}\") return", "= discord.utils.get(self.bot.guilds, name=GUILD) text_channels = '\\n - '.join([channel.name for channel in guild.text_channels]) voice_channels", "{voice_channels}\\n\\n\" f\"**Server Members: {guild.member_count}**\\n - {members}\") @commands.command(name='unban', description='Unban member from server', help='Unban member", "async def members(self, ctx, showall=None): guild = discord.utils.get(self.bot.guilds, name=GUILD) members = '' for", "software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "by applicable law or agreed to in writing, software distributed under the License", "member(self, ctx, mention): guild = discord.utils.get(self.bot.guilds, name=GUILD) for member in guild.members: if str(member.id)", "print out a human friendly # equivilant. data += f\"Air Quality Index: {jsonResponse['data']['current']['pollution']['aqius']}", "'**Data provided by abuseipdb.com**\\n\\n' for k, v in ip_info['data'].items(): data += f\"{k}: {v}\\n\"", "Bishbot - https://github.com/ldgregory/bishbot <NAME> <<EMAIL>> fun.py v0.1 Tested to Python v3.7.3 Description: Fun", "async def weather(self, ctx, city, state): # This intended as a 'good enough'", "'Thunderstorm', '13': 'Snow', '50': 'Mist'} aqius_mapping = {'Good': range(0, 50), 'Moderate': range(51, 100),", "like 01d or 01n to differentiate day or night, we don't care #", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License", "= requests.request(method='GET', url=url, headers=headers, params=querystring) ip_info = json.loads(response.text) data = '**Data provided by", "ip_info = json.loads(response.text) data = '**Data provided by abuseipdb.com**\\n\\n' for k, v in", "300), 'Hazardous': range(301, 500)} wind_mapping = {'North': range(349, 360), 'N': range(0, 11), 'NNE':", "await ctx.channel.purge(limit=amount) @commands.command(name='ip_abuse', description='Get abuse score for IP', help='Get abuse score for IP',", "F temps and wind_mapping where NNE is # actually 11.25 - 33.75 degrees", "@commands.has_role('admins') async def server(self, ctx): guild = discord.utils.get(self.bot.guilds, name=GUILD) text_channels = '\\n -", "New_Mexico', ignore_extra=True, hidden=True, enabled=True) async def weather(self, ctx, city, state): # This intended", "- '.join([channel.name for channel in guild.voice_channels]) members = '\\n - '.join([member.name for member", "guild.members]) await ctx.channel.send(f\"Server Name: {guild.name} (ID: {guild.id})\\n\" f\"Server Owner: {guild.owner} (ID: {guild.owner_id})\\n\" f\"Server", "in member.roles]) + \")\\n\" else: members += f\"- {member.display_name}\\n\" await ctx.channel.send(f\"**Server Members: {guild.member_count}**\\n{members.replace('@',", "range(0, 50), 'Moderate': range(51, 100), 'Unhealthy for Sensitive Groups': range(101, 150), 'Unhealthy': range(151,", "help='Unban member from server', ignore_extra=True, hidden=True, enabled=False) async def unban(self, ctx, *, member):", "ctx.send(f\"Unbanned {user.mention}\") return @commands.command(name='weather', description='Weather as !weather CITY STATE', help='Weather as !weather Santa_Fe", "ctx, *, member): banned_users = await ctx.guild.bans() member_name, member_discriminator = member.split('#') for ban_entry", "def ban(self, ctx, member: discord.member, *, reason=None): if member.hasPermission('BAN_MEMBERS'): await member.ban(reason=reason) await ctx.send(f\"Banned", "BOT_PREFIX = os.getenv('DISCORD_BOT_PREFIX') GUILD = os.getenv('DISCORD_GUILD') AIRVISUAL_KEY = os.getenv('AIRVISUAL_KEY') class Utility(commands.Cog): def __init__(self,", "Santa_Fe New_Mexico', ignore_extra=True, hidden=True, enabled=True) async def weather(self, ctx, city, state): # This", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "guild.voice_channels]) members = '\\n - '.join([member.name for member in guild.members]) await ctx.channel.send(f\"Server Name:", "Clouds', '04': 'Broken Clouds', '09': 'Shower Rain', '10': 'Rain', '11': 'Thunderstorm', '13': 'Snow',", "ignore_extra=True, hidden=False, enabled=True) @commands.has_permissions(manage_messages=True) async def clear(self, ctx, amount=3): await ctx.channel.purge(limit=amount) @commands.command(name='ip_abuse', description='Get", "in compliance with the License. You may obtain a copy of the License", "enabled=True) @commands.has_permissions(manage_messages=True) async def clear(self, ctx, amount=3): await ctx.channel.purge(limit=amount) @commands.command(name='ip_abuse', description='Get abuse score", "ctx.channel.purge(limit=amount) @commands.command(name='ip_abuse', description='Get abuse score for IP', help='Get abuse score for IP', ignore_extra=True,", "(ID: {guild.id})\\n\" f\"Server Owner: {guild.owner} (ID: {guild.owner_id})\\n\" f\"Server Description: {guild.description}\\n\" f\"Region: {guild.region}\\n\" f\"File", "'Hazardous': range(301, 500)} wind_mapping = {'North': range(349, 360), 'N': range(0, 11), 'NNE': range(12,", "help='Info about you', ignore_extra=True, hidden=True, enabled=True) async def whoami(self, ctx): await ctx.channel.send(f\"User Name:", "KIND, either express or implied. See the License for the specific language governing", "- Initial code Copyright 2020 <NAME> Licensed under the Apache License, Version 2.0", "and print out a human friendly # equivilant. data += f\"Air Quality Index:", "{guild.filesize_limit} bytes\\n\\n\" f\"**Text Channels:**\\n - {text_channels}\\n\\n\" f\"**Voice Channels:**\\n - {voice_channels}\\n\\n\" f\"**Server Members: {guild.member_count}**\\n", "os.getenv('DISCORD_GUILD') AIRVISUAL_KEY = os.getenv('AIRVISUAL_KEY') class Utility(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener()", "score for IP', ignore_extra=True, hidden=False, enabled=True) async def ip_abuse(self, ctx, ipAddress): ABUSEIPDB_KEY =", "for key, val in wind_mapping.items(): if int(jsonResponse['data']['current']['weather']['wd']) in val: data += f\"{jsonResponse['data']['current']['weather']['wd']}° ({key})\\n\"", "under the License. \"\"\" import discord import json import os import requests from", "requests.request(method='GET', url=url) jsonResponse = json.loads(response.text) if jsonResponse['status'] == 'success': weather_mapping = {'01': 'Clear", "in writing, software distributed under the License is distributed on an \"AS IS\"", "# if role: # If get could find the role # await client.add_role(ctx.message.author,", "'WSW': range(237, 259), 'W': range(260, 281), 'WNW': range(282, 304), 'NW': range(305, 326), 'NNW':", "of spaces in city or state, i.e. Sante_Fe New_Mexico\") @commands.command(name='whoami', description='Info about you',", "This intended as a 'good enough' tool. There are some accuracy issues #", "degrees vs the ints required by range(). url = f\"https://api.airvisual.com/v2/city?city={city.replace('_', '%20')}&state={state.replace('_', '%20')}&country=USA&key={AIRVISUAL_KEY}\" response", "writing, software distributed under the License is distributed on an \"AS IS\" BASIS,", "= {'01': 'Clear Sky', '02': 'Few Clouds', '03': 'Scattered Clouds', '04': 'Broken Clouds',", "ctx.author.edit(nick=f\"{nickname}\") # role = get(ctx.message.server.roles, name='ROLE_NAME') # if role: # If get could", "actually 11.25 - 33.75 degrees vs the ints required by range(). url =", "33.75 degrees vs the ints required by range(). url = f\"https://api.airvisual.com/v2/city?city={city.replace('_', '%20')}&state={state.replace('_', '%20')}&country=USA&key={AIRVISUAL_KEY}\"", "if jsonResponse['status'] == 'success': weather_mapping = {'01': 'Clear Sky', '02': 'Few Clouds', '03':", "aqius_mapping.items(): if int(jsonResponse['data']['current']['pollution']['aqius']) in val: data += f\"{key}\\n\" data += f\"Temperature: {int(((jsonResponse['data']['current']['weather']['tp']) *", "{int(((jsonResponse['data']['current']['weather']['tp']) * 9) / 5) + 32}°F\\n\" data += f\"Pressure: {jsonResponse['data']['current']['weather']['pr']} hPa\\n\" data", "weather_mapping = {'01': 'Clear Sky', '02': 'Few Clouds', '03': 'Scattered Clouds', '04': 'Broken", "ID: {member.id}\\n\" f\"Joined: {member.joined_at}\\n\" f\"Status: {member.status}\\n\" # f\"Is on Mobile: {member.is_on_mobile}\\n\" # f\"Activity:", "# so we're just mapping the numerical part to the human friendly text", "role: # If get could find the role # await client.add_role(ctx.message.author, role) @commands.command(name='ping',", "or agreed to in writing, software distributed under the License is distributed on", "def nickname(self, ctx, *, nickname): await ctx.author.edit(nick=f\"{nickname}\") # role = get(ctx.message.server.roles, name='ROLE_NAME') #", "you', ignore_extra=True, hidden=True, enabled=True) async def whoami(self, ctx): await ctx.channel.send(f\"User Name: {ctx.author.name}\\nUser ID:", "if member.hasPermission('BAN_MEMBERS'): await member.ban(reason=reason) await ctx.send(f\"Banned {member.mention}\") @commands.command(name='clear', description='Clear x messages, defaults to", "await ctx.channel.send(data) else: await ctx.channel.send(f\"No data found. Make sure to use underscores instead", "out a human friendly # equivilant. data += f\"Air Quality Index: {jsonResponse['data']['current']['pollution']['aqius']} -", "members = '\\n - '.join([member.name for member in guild.members]) await ctx.channel.send(f\"Server Name: {guild.name}", "'09': 'Shower Rain', '10': 'Rain', '11': 'Thunderstorm', '13': 'Snow', '50': 'Mist'} aqius_mapping =", "ignore_extra=True, hidden=False, enabled=True) async def ip_abuse(self, ctx, ipAddress): ABUSEIPDB_KEY = os.getenv('ABUSEIPDB_KEY') url =", "= discord.utils.get(self.bot.guilds, name=GUILD) for member in guild.members: if str(member.id) == str(mention.lstrip('<@!').rstrip('>')): await ctx.channel.send(f\"Nickname:", "description='Ping latency', help='Ping latency', ignore_extra=True, hidden=False, enabled=True) async def ping(self, ctx): await ctx.channel.send(f\"Pong...", "nickname(self, ctx, *, nickname): await ctx.author.edit(nick=f\"{nickname}\") # role = get(ctx.message.server.roles, name='ROLE_NAME') # if", "@commands.command(name='ping', description='Ping latency', help='Ping latency', ignore_extra=True, hidden=False, enabled=True) async def ping(self, ctx): await", "'S': range(170, 191), 'SSW': range(192, 214), 'SW': range(215, 236), 'WSW': range(237, 259), 'W':", "'%20')}&state={state.replace('_', '%20')}&country=USA&key={AIRVISUAL_KEY}\" response = requests.request(method='GET', url=url) jsonResponse = json.loads(response.text) if jsonResponse['status'] == 'success':", "guild.members: if showall == 'showall': members += f\"- {member.display_name} : \" members +=", "defaults to 3', help='Clear x messages, defaults to 3', ignore_extra=True, hidden=False, enabled=True) @commands.has_permissions(manage_messages=True)", "for intents 20200522 - Initial code Copyright 2020 <NAME> Licensed under the Apache", "range(301, 500)} wind_mapping = {'North': range(349, 360), 'N': range(0, 11), 'NNE': range(12, 34),", "from server', help='Ban member from server', ignore_extra=True, hidden=True, enabled=False) async def ban(self, ctx,", "member from server', ignore_extra=True, hidden=True, enabled=False) async def unban(self, ctx, *, member): banned_users", "'Few Clouds', '03': 'Scattered Clouds', '04': 'Broken Clouds', '09': 'Shower Rain', '10': 'Rain',", "- 33.75 degrees vs the ints required by range(). url = f\"https://api.airvisual.com/v2/city?city={city.replace('_', '%20')}&state={state.replace('_',", "hidden=True, enabled=True) async def weather(self, ctx, city, state): # This intended as a", "', '.join([role.name for role in member.roles]) + \")\\n\" else: members += f\"- {member.display_name}\\n\"", "help='Member information', ignore_extra=True, hidden=False, enabled=True) async def member(self, ctx, mention): guild = discord.utils.get(self.bot.guilds,", "{member.nick}\\n\" f\"Discord Name: {member.name}#{member.discriminator}\\n\" f\"Discord ID: {member.id}\\n\" f\"Joined: {member.joined_at}\\n\" f\"Status: {member.status}\\n\" # f\"Is", "import discord import json import os import requests from discord.ext import commands from", "jsonResponse['status'] == 'success': weather_mapping = {'01': 'Clear Sky', '02': 'Few Clouds', '03': 'Scattered", "user.discriminator) == (member_name, member_discriminator): await ctx.guild.unban(user) await ctx.send(f\"Unbanned {user.mention}\") return @commands.command(name='weather', description='Weather as", "''))}\\n\") @commands.command(name='members', description='Current Members', help='Current Members', ignore_extra=True, hidden=False, enabled=True) async def members(self, ctx,", "'')}\") @commands.command(name='nickname', description='Change nickname', help='Change nickname', ignore_extra=True, hidden=False, enabled=True) async def nickname(self, ctx,", "server', help='Ban member from server', ignore_extra=True, hidden=True, enabled=False) async def ban(self, ctx, member:", "wind_mapping where NNE is # actually 11.25 - 33.75 degrees vs the ints", "channel in guild.text_channels]) voice_channels = '\\n - '.join([channel.name for channel in guild.voice_channels]) members", "326), 'NNW': range(327, 348)} data = f\"{jsonResponse['data']['city']}, {jsonResponse['data']['state']}, {jsonResponse['data']['country']}\\n\" data += f\"{jsonResponse['data']['location']['coordinates']}\\n\" #", "+= f\"Humidity: {jsonResponse['data']['current']['weather']['hu']}%\\n\" data += f\"Wind: {int(jsonResponse['data']['current']['weather']['ws'] * 2.236936)} m/h from \" for", "def kick(self, ctx, member: discord.member, *, reason=None): if member.hasPermission('KICK_MEMBERS'): await member.kick(reason=reason) await ctx.send(f\"Kicked", "OR CONDITIONS OF ANY KIND, either express or implied. See the License for", "{guild.member_count}**\\n - {members}\") @commands.command(name='unban', description='Unban member from server', help='Unban member from server', ignore_extra=True,", "OF ANY KIND, either express or implied. See the License for the specific", "the ints required by range(). url = f\"https://api.airvisual.com/v2/city?city={city.replace('_', '%20')}&state={state.replace('_', '%20')}&country=USA&key={AIRVISUAL_KEY}\" response = requests.request(method='GET',", "members command for intents 20200522 - Initial code Copyright 2020 <NAME> Licensed under", "enabled=True) async def nickname(self, ctx, *, nickname): await ctx.author.edit(nick=f\"{nickname}\") # role = get(ctx.message.server.roles,", "headers=headers, params=querystring) ip_info = json.loads(response.text) data = '**Data provided by abuseipdb.com**\\n\\n' for k,", "in member.roles]).replace('@', ''))}\\n\") @commands.command(name='members', description='Current Members', help='Current Members', ignore_extra=True, hidden=False, enabled=True) async def", "= get(ctx.message.server.roles, name='ROLE_NAME') # if role: # If get could find the role", "import os import requests from discord.ext import commands from dotenv import load_dotenv load_dotenv()", "await ctx.send(f\"Kicked {member.mention}\") @commands.command(name='member', description='Member information', help='Member information', ignore_extra=True, hidden=False, enabled=True) async def", "{guild.region}\\n\" f\"File Size Limit: {guild.filesize_limit} bytes\\n\\n\" f\"**Text Channels:**\\n - {text_channels}\\n\\n\" f\"**Voice Channels:**\\n -", "# role = get(ctx.message.server.roles, name='ROLE_NAME') # if role: # If get could find", "259), 'W': range(260, 281), 'WNW': range(282, 304), 'NW': range(305, 326), 'NNW': range(327, 348)}", "# ic comes through like 01d or 01n to differentiate day or night,", "hPa\\n\" data += f\"Humidity: {jsonResponse['data']['current']['weather']['hu']}%\\n\" data += f\"Wind: {int(jsonResponse['data']['current']['weather']['ws'] * 2.236936)} m/h from", "then do a map to ranges and print out a human friendly #", "may not use this file except in compliance with the License. You may", "{jsonResponse['data']['country']}\\n\" data += f\"{jsonResponse['data']['location']['coordinates']}\\n\" # ic comes through like 01d or 01n to", "for role in member.roles]).replace('@', ''))}\\n\") @commands.command(name='members', description='Current Members', help='Current Members', ignore_extra=True, hidden=False, enabled=True)", "'Shower Rain', '10': 'Rain', '11': 'Thunderstorm', '13': 'Snow', '50': 'Mist'} aqius_mapping = {'Good':", "ctx.channel.send(f\"**Server Members: {guild.member_count}**\\n{members.replace('@', '')}\") @commands.command(name='nickname', description='Change nickname', help='Change nickname', ignore_extra=True, hidden=False, enabled=True) async", "if member.hasPermission('KICK_MEMBERS'): await member.kick(reason=reason) await ctx.send(f\"Kicked {member.mention}\") @commands.command(name='member', description='Member information', help='Member information', ignore_extra=True,", "== 'showall': members += f\"- {member.display_name} : \" members += f\"{member.name} (\" members", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "ipAddress, 'maxAgeInDays': '90' } headers = { 'Accept': 'application/json', 'Key': ABUSEIPDB_KEY } response", "await ctx.send(f\"Banned {member.mention}\") @commands.command(name='clear', description='Clear x messages, defaults to 3', help='Clear x messages,", "url=url, headers=headers, params=querystring) ip_info = json.loads(response.text) data = '**Data provided by abuseipdb.com**\\n\\n' for", "ctx): guild = discord.utils.get(self.bot.guilds, name=GUILD) text_channels = '\\n - '.join([channel.name for channel in", "latency', ignore_extra=True, hidden=False, enabled=True) async def ping(self, ctx): await ctx.channel.send(f\"Pong... {round(self.bot.latency * 1000)}", "members += f\"{member.name} (\" members += ', '.join([role.name for role in member.roles]) +", "description='Current Members', help='Current Members', ignore_extra=True, hidden=False, enabled=True) async def members(self, ctx, showall=None): guild", "Owner: {guild.owner} (ID: {guild.owner_id})\\n\" f\"Server Description: {guild.description}\\n\" f\"Region: {guild.region}\\n\" f\"File Size Limit: {guild.filesize_limit}", "- {text_channels}\\n\\n\" f\"**Voice Channels:**\\n - {voice_channels}\\n\\n\" f\"**Server Members: {guild.member_count}**\\n - {members}\") @commands.command(name='unban', description='Unban", "'' for member in guild.members: if showall == 'showall': members += f\"- {member.display_name}", "numerical part to the human friendly text version. data += f\"{weather_mapping[jsonResponse['data']['current']['weather']['ic'][:-1]]}\\n\" # Print", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "} response = requests.request(method='GET', url=url, headers=headers, params=querystring) ip_info = json.loads(response.text) data = '**Data", "# f\"Activity: {str(member.activity.type).lstrip('ActivityType.')} {member.activity.name}\\n\" f\"Guild: {member.guild}\\n\" f\"Guild Permissions: {member.guild_permissions}\\n\" f\"Top Role: {member.top_role}\\n\" f\"Roles:", "'02': 'Few Clouds', '03': 'Scattered Clouds', '04': 'Broken Clouds', '09': 'Shower Rain', '10':", "for k, v in ip_info['data'].items(): data += f\"{k}: {v}\\n\" await ctx.channel.send(data) @commands.command(name='kick', description='Kick", "role = get(ctx.message.server.roles, name='ROLE_NAME') # if role: # If get could find the", "Sante_Fe New_Mexico\") @commands.command(name='whoami', description='Info about you', help='Info about you', ignore_extra=True, hidden=True, enabled=True) async", "data += f\"{k}: {v}\\n\" await ctx.channel.send(data) @commands.command(name='kick', description='Kick member off server', help='Kick member", "on Mobile: {member.is_on_mobile}\\n\" # f\"Activity: {str(member.activity.type).lstrip('ActivityType.')} {member.activity.name}\\n\" f\"Guild: {member.guild}\\n\" f\"Guild Permissions: {member.guild_permissions}\\n\" f\"Top", "care # so we're just mapping the numerical part to the human friendly", "discord.ext import commands from dotenv import load_dotenv load_dotenv() BOT_PREFIX = os.getenv('DISCORD_BOT_PREFIX') GUILD =", "range(0, 11), 'NNE': range(12, 34), 'NE': range(35, 56), 'ENE': range(57, 79), 'E': range(80,", "'NE': range(35, 56), 'ENE': range(57, 79), 'E': range(80, 101), 'ESE': range(102, 124), 'SE':", "from server', ignore_extra=True, hidden=True, enabled=False) async def unban(self, ctx, *, member): banned_users =", "'50': 'Mist'} aqius_mapping = {'Good': range(0, 50), 'Moderate': range(51, 100), 'Unhealthy for Sensitive", "or night, we don't care # so we're just mapping the numerical part", "!weather CITY STATE', help='Weather as !weather Santa_Fe New_Mexico', ignore_extra=True, hidden=True, enabled=True) async def", "'N': range(0, 11), 'NNE': range(12, 34), 'NE': range(35, 56), 'ENE': range(57, 79), 'E':", "load_dotenv() BOT_PREFIX = os.getenv('DISCORD_BOT_PREFIX') GUILD = os.getenv('DISCORD_GUILD') AIRVISUAL_KEY = os.getenv('AIRVISUAL_KEY') class Utility(commands.Cog): def", "# await client.add_role(ctx.message.author, role) @commands.command(name='ping', description='Ping latency', help='Ping latency', ignore_extra=True, hidden=False, enabled=True) async", "Fixed members command for intents 20200522 - Initial code Copyright 2020 <NAME> Licensed", "defaults to 3', ignore_extra=True, hidden=False, enabled=True) @commands.has_permissions(manage_messages=True) async def clear(self, ctx, amount=3): await", "ms\") @commands.command(name='server', description='Server information', help='Server information', ignore_extra=True, hidden=False, enabled=True) @commands.has_role('admins') async def server(self,", "enabled=True) async def members(self, ctx, showall=None): guild = discord.utils.get(self.bot.guilds, name=GUILD) members = ''", "ignore_extra=True, hidden=False, enabled=True) async def members(self, ctx, showall=None): guild = discord.utils.get(self.bot.guilds, name=GUILD) members", "# If get could find the role # await client.add_role(ctx.message.author, role) @commands.command(name='ping', description='Ping", "get could find the role # await client.add_role(ctx.message.author, role) @commands.command(name='ping', description='Ping latency', help='Ping", "v3.7.3 Description: Fun commands for everyone Changelog: 20210606 - Fixed members command for", "ip_abuse(self, ctx, ipAddress): ABUSEIPDB_KEY = os.getenv('ABUSEIPDB_KEY') url = 'https://api.abuseipdb.com/api/v2/check' querystring = { 'ipAddress':", "find the role # await client.add_role(ctx.message.author, role) @commands.command(name='ping', description='Ping latency', help='Ping latency', ignore_extra=True,", "See the License for the specific language governing permissions and limitations under the", "* 2.236936)} m/h from \" for key, val in wind_mapping.items(): if int(jsonResponse['data']['current']['weather']['wd']) in", "do a map to ranges and print out a human friendly # equivilant.", "@commands.command(name='members', description='Current Members', help='Current Members', ignore_extra=True, hidden=False, enabled=True) async def members(self, ctx, showall=None):", "304), 'NW': range(305, 326), 'NNW': range(327, 348)} data = f\"{jsonResponse['data']['city']}, {jsonResponse['data']['state']}, {jsonResponse['data']['country']}\\n\" data", "import requests from discord.ext import commands from dotenv import load_dotenv load_dotenv() BOT_PREFIX =", "member in guild.members: if str(member.id) == str(mention.lstrip('<@!').rstrip('>')): await ctx.channel.send(f\"Nickname: {member.nick}\\n\" f\"Discord Name: {member.name}#{member.discriminator}\\n\"", "if int(jsonResponse['data']['current']['weather']['wd']) in val: data += f\"{jsonResponse['data']['current']['weather']['wd']}° ({key})\\n\" await ctx.channel.send(data) else: await ctx.channel.send(f\"No", "'11': 'Thunderstorm', '13': 'Snow', '50': 'Mist'} aqius_mapping = {'Good': range(0, 50), 'Moderate': range(51,", "- \" for key, val in aqius_mapping.items(): if int(jsonResponse['data']['current']['pollution']['aqius']) in val: data +=", "load_dotenv load_dotenv() BOT_PREFIX = os.getenv('DISCORD_BOT_PREFIX') GUILD = os.getenv('DISCORD_GUILD') AIRVISUAL_KEY = os.getenv('AIRVISUAL_KEY') class Utility(commands.Cog):", "AQIUS then do a map to ranges and print out a human friendly", "Permissions: {member.guild_permissions}\\n\" f\"Top Role: {member.top_role}\\n\" f\"Roles: {str(', '.join([role.name for role in member.roles]).replace('@', ''))}\\n\")", "clear(self, ctx, amount=3): await ctx.channel.purge(limit=amount) @commands.command(name='ip_abuse', description='Get abuse score for IP', help='Get abuse", "to use underscores instead of spaces in city or state, i.e. Sante_Fe New_Mexico\")", "from dotenv import load_dotenv load_dotenv() BOT_PREFIX = os.getenv('DISCORD_BOT_PREFIX') GUILD = os.getenv('DISCORD_GUILD') AIRVISUAL_KEY =", "information', help='Server information', ignore_extra=True, hidden=False, enabled=True) @commands.has_role('admins') async def server(self, ctx): guild =", "+= f\"Pressure: {jsonResponse['data']['current']['weather']['pr']} hPa\\n\" data += f\"Humidity: {jsonResponse['data']['current']['weather']['hu']}%\\n\" data += f\"Wind: {int(jsonResponse['data']['current']['weather']['ws'] *", "params=querystring) ip_info = json.loads(response.text) data = '**Data provided by abuseipdb.com**\\n\\n' for k, v", "- {members}\") @commands.command(name='unban', description='Unban member from server', help='Unban member from server', ignore_extra=True, hidden=True,", "m/h from \" for key, val in wind_mapping.items(): if int(jsonResponse['data']['current']['weather']['wd']) in val: data", "ctx.guild.bans() member_name, member_discriminator = member.split('#') for ban_entry in banned_users: user = ban_entry.user if", "'.join([member.name for member in guild.members]) await ctx.channel.send(f\"Server Name: {guild.name} (ID: {guild.id})\\n\" f\"Server Owner:", "data found. Make sure to use underscores instead of spaces in city or", "everyone Changelog: 20210606 - Fixed members command for intents 20200522 - Initial code", "sure to use underscores instead of spaces in city or state, i.e. Sante_Fe", "this file except in compliance with the License. You may obtain a copy", "name='ROLE_NAME') # if role: # If get could find the role # await", "def weather(self, ctx, city, state): # This intended as a 'good enough' tool.", "range(147, 169), 'S': range(170, 191), 'SSW': range(192, 214), 'SW': range(215, 236), 'WSW': range(237,", "so we're just mapping the numerical part to the human friendly text version.", "\"License\"); you may not use this file except in compliance with the License.", "if int(jsonResponse['data']['current']['pollution']['aqius']) in val: data += f\"{key}\\n\" data += f\"Temperature: {int(((jsonResponse['data']['current']['weather']['tp']) * 9)", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "code Copyright 2020 <NAME> Licensed under the Apache License, Version 2.0 (the \"License\");", "Clouds', '09': 'Shower Rain', '10': 'Rain', '11': 'Thunderstorm', '13': 'Snow', '50': 'Mist'} aqius_mapping", "+= f\"Air Quality Index: {jsonResponse['data']['current']['pollution']['aqius']} - \" for key, val in aqius_mapping.items(): if", "you may not use this file except in compliance with the License. You", "intended as a 'good enough' tool. There are some accuracy issues # such", "/ 5) + 32}°F\\n\" data += f\"Pressure: {jsonResponse['data']['current']['weather']['pr']} hPa\\n\" data += f\"Humidity: {jsonResponse['data']['current']['weather']['hu']}%\\n\"", "ignore_extra=True, hidden=True, enabled=True) async def whoami(self, ctx): await ctx.channel.send(f\"User Name: {ctx.author.name}\\nUser ID: {ctx.author.id}\")", "agreed to in writing, software distributed under the License is distributed on an", "def ip_abuse(self, ctx, ipAddress): ABUSEIPDB_KEY = os.getenv('ABUSEIPDB_KEY') url = 'https://api.abuseipdb.com/api/v2/check' querystring = {", "or state, i.e. Sante_Fe New_Mexico\") @commands.command(name='whoami', description='Info about you', help='Info about you', ignore_extra=True,", "help='Clear x messages, defaults to 3', ignore_extra=True, hidden=False, enabled=True) @commands.has_permissions(manage_messages=True) async def clear(self,", "'SSW': range(192, 214), 'SW': range(215, 236), 'WSW': range(237, 259), 'W': range(260, 281), 'WNW':", "Index: {jsonResponse['data']['current']['pollution']['aqius']} - \" for key, val in aqius_mapping.items(): if int(jsonResponse['data']['current']['pollution']['aqius']) in val:", "spaces in city or state, i.e. Sante_Fe New_Mexico\") @commands.command(name='whoami', description='Info about you', help='Info", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "night, we don't care # so we're just mapping the numerical part to", "@commands.command(name='server', description='Server information', help='Server information', ignore_extra=True, hidden=False, enabled=True) @commands.has_role('admins') async def server(self, ctx):", "in aqius_mapping.items(): if int(jsonResponse['data']['current']['pollution']['aqius']) in val: data += f\"{key}\\n\" data += f\"Temperature: {int(((jsonResponse['data']['current']['weather']['tp'])", "discord import json import os import requests from discord.ext import commands from dotenv", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "for everyone Changelog: 20210606 - Fixed members command for intents 20200522 - Initial", "f\"{jsonResponse['data']['current']['weather']['wd']}° ({key})\\n\" await ctx.channel.send(data) else: await ctx.channel.send(f\"No data found. Make sure to use", "implied. See the License for the specific language governing permissions and limitations under", "help='Get abuse score for IP', ignore_extra=True, hidden=False, enabled=True) async def ip_abuse(self, ctx, ipAddress):", "'Unhealthy for Sensitive Groups': range(101, 150), 'Unhealthy': range(151, 200), 'Very Unhealthy': range(201, 300),", "in ip_info['data'].items(): data += f\"{k}: {v}\\n\" await ctx.channel.send(data) @commands.command(name='kick', description='Kick member off server',", "11.25 - 33.75 degrees vs the ints required by range(). url = f\"https://api.airvisual.com/v2/city?city={city.replace('_',", "aqius_mapping = {'Good': range(0, 50), 'Moderate': range(51, 100), 'Unhealthy for Sensitive Groups': range(101,", "ipAddress): ABUSEIPDB_KEY = os.getenv('ABUSEIPDB_KEY') url = 'https://api.abuseipdb.com/api/v2/check' querystring = { 'ipAddress': ipAddress, 'maxAgeInDays':", "hidden=False, enabled=True) async def members(self, ctx, showall=None): guild = discord.utils.get(self.bot.guilds, name=GUILD) members =", "information', ignore_extra=True, hidden=False, enabled=True) @commands.has_role('admins') async def server(self, ctx): guild = discord.utils.get(self.bot.guilds, name=GUILD)", "to differentiate day or night, we don't care # so we're just mapping", "self.bot = bot @commands.Cog.listener() async def on_ready(self): print('- Utility Cog loaded') @commands.command(name='ban', description='Ban", "v0.1 Tested to Python v3.7.3 Description: Fun commands for everyone Changelog: 20210606 -", "else: await ctx.channel.send(f\"No data found. Make sure to use underscores instead of spaces", "await client.add_role(ctx.message.author, role) @commands.command(name='ping', description='Ping latency', help='Ping latency', ignore_extra=True, hidden=False, enabled=True) async def", "requests from discord.ext import commands from dotenv import load_dotenv load_dotenv() BOT_PREFIX = os.getenv('DISCORD_BOT_PREFIX')", "__init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_ready(self): print('- Utility Cog loaded')", "'\\n - '.join([channel.name for channel in guild.voice_channels]) members = '\\n - '.join([member.name for", "{member.id}\\n\" f\"Joined: {member.joined_at}\\n\" f\"Status: {member.status}\\n\" # f\"Is on Mobile: {member.is_on_mobile}\\n\" # f\"Activity: {str(member.activity.type).lstrip('ActivityType.')}", "discord.utils.get(self.bot.guilds, name=GUILD) text_channels = '\\n - '.join([channel.name for channel in guild.text_channels]) voice_channels =", "in city or state, i.e. Sante_Fe New_Mexico\") @commands.command(name='whoami', description='Info about you', help='Info about", "data += f\"Humidity: {jsonResponse['data']['current']['weather']['hu']}%\\n\" data += f\"Wind: {int(jsonResponse['data']['current']['weather']['ws'] * 2.236936)} m/h from \"", "range(192, 214), 'SW': range(215, 236), 'WSW': range(237, 259), 'W': range(260, 281), 'WNW': range(282,", "169), 'S': range(170, 191), 'SSW': range(192, 214), 'SW': range(215, 236), 'WSW': range(237, 259),", "await ctx.channel.send(f\"**Server Members: {guild.member_count}**\\n{members.replace('@', '')}\") @commands.command(name='nickname', description='Change nickname', help='Change nickname', ignore_extra=True, hidden=False, enabled=True)", "the human friendly text version. data += f\"{weather_mapping[jsonResponse['data']['current']['weather']['ic'][:-1]]}\\n\" # Print out the AQIUS", "'90' } headers = { 'Accept': 'application/json', 'Key': ABUSEIPDB_KEY } response = requests.request(method='GET',", "information', help='Member information', ignore_extra=True, hidden=False, enabled=True) async def member(self, ctx, mention): guild =", "if str(member.id) == str(mention.lstrip('<@!').rstrip('>')): await ctx.channel.send(f\"Nickname: {member.nick}\\n\" f\"Discord Name: {member.name}#{member.discriminator}\\n\" f\"Discord ID: {member.id}\\n\"", "use this file except in compliance with the License. You may obtain a", "os.getenv('DISCORD_BOT_PREFIX') GUILD = os.getenv('DISCORD_GUILD') AIRVISUAL_KEY = os.getenv('AIRVISUAL_KEY') class Utility(commands.Cog): def __init__(self, bot): self.bot", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "querystring = { 'ipAddress': ipAddress, 'maxAgeInDays': '90' } headers = { 'Accept': 'application/json',", "Sky', '02': 'Few Clouds', '03': 'Scattered Clouds', '04': 'Broken Clouds', '09': 'Shower Rain',", "ctx, member: discord.member, *, reason=None): if member.hasPermission('KICK_MEMBERS'): await member.kick(reason=reason) await ctx.send(f\"Kicked {member.mention}\") @commands.command(name='member',", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use", "'ENE': range(57, 79), 'E': range(80, 101), 'ESE': range(102, 124), 'SE': range(125, 146), 'SSE':", "for channel in guild.text_channels]) voice_channels = '\\n - '.join([channel.name for channel in guild.voice_channels])", "ic comes through like 01d or 01n to differentiate day or night, we", "the role # await client.add_role(ctx.message.author, role) @commands.command(name='ping', description='Ping latency', help='Ping latency', ignore_extra=True, hidden=False,", "showall == 'showall': members += f\"- {member.display_name} : \" members += f\"{member.name} (\"", "Channels:**\\n - {voice_channels}\\n\\n\" f\"**Server Members: {guild.member_count}**\\n - {members}\") @commands.command(name='unban', description='Unban member from server',", "name=GUILD) members = '' for member in guild.members: if showall == 'showall': members", "server(self, ctx): guild = discord.utils.get(self.bot.guilds, name=GUILD) text_channels = '\\n - '.join([channel.name for channel", "for channel in guild.voice_channels]) members = '\\n - '.join([member.name for member in guild.members])", "or 01n to differentiate day or night, we don't care # so we're", "messages, defaults to 3', ignore_extra=True, hidden=False, enabled=True) @commands.has_permissions(manage_messages=True) async def clear(self, ctx, amount=3):", "import json import os import requests from discord.ext import commands from dotenv import", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "'SW': range(215, 236), 'WSW': range(237, 259), 'W': range(260, 281), 'WNW': range(282, 304), 'NW':", "abuse score for IP', help='Get abuse score for IP', ignore_extra=True, hidden=False, enabled=True) async", "json.loads(response.text) data = '**Data provided by abuseipdb.com**\\n\\n' for k, v in ip_info['data'].items(): data", "governing permissions and limitations under the License. \"\"\" import discord import json import", "in wind_mapping.items(): if int(jsonResponse['data']['current']['weather']['wd']) in val: data += f\"{jsonResponse['data']['current']['weather']['wd']}° ({key})\\n\" await ctx.channel.send(data) else:", "range(57, 79), 'E': range(80, 101), 'ESE': range(102, 124), 'SE': range(125, 146), 'SSE': range(147,", "<NAME> <<EMAIL>> fun.py v0.1 Tested to Python v3.7.3 Description: Fun commands for everyone", "server', ignore_extra=True, hidden=True, enabled=False) async def unban(self, ctx, *, member): banned_users = await", "{member.activity.name}\\n\" f\"Guild: {member.guild}\\n\" f\"Guild Permissions: {member.guild_permissions}\\n\" f\"Top Role: {member.top_role}\\n\" f\"Roles: {str(', '.join([role.name for", "f\"- {member.display_name}\\n\" await ctx.channel.send(f\"**Server Members: {guild.member_count}**\\n{members.replace('@', '')}\") @commands.command(name='nickname', description='Change nickname', help='Change nickname', ignore_extra=True,", "banned_users: user = ban_entry.user if (user.name, user.discriminator) == (member_name, member_discriminator): await ctx.guild.unban(user) await", "wind_mapping.items(): if int(jsonResponse['data']['current']['weather']['wd']) in val: data += f\"{jsonResponse['data']['current']['weather']['wd']}° ({key})\\n\" await ctx.channel.send(data) else: await", "Utility Cog loaded') @commands.command(name='ban', description='Ban member from server', help='Ban member from server', ignore_extra=True,", "20200522 - Initial code Copyright 2020 <NAME> Licensed under the Apache License, Version", "range(349, 360), 'N': range(0, 11), 'NNE': range(12, 34), 'NE': range(35, 56), 'ENE': range(57,", "issues # such as conversion of C to F temps and wind_mapping where", "member.hasPermission('KICK_MEMBERS'): await member.kick(reason=reason) await ctx.send(f\"Kicked {member.mention}\") @commands.command(name='member', description='Member information', help='Member information', ignore_extra=True, hidden=False,", "348)} data = f\"{jsonResponse['data']['city']}, {jsonResponse['data']['state']}, {jsonResponse['data']['country']}\\n\" data += f\"{jsonResponse['data']['location']['coordinates']}\\n\" # ic comes through", "for Sensitive Groups': range(101, 150), 'Unhealthy': range(151, 200), 'Very Unhealthy': range(201, 300), 'Hazardous':", "for key, val in aqius_mapping.items(): if int(jsonResponse['data']['current']['pollution']['aqius']) in val: data += f\"{key}\\n\" data", "help='Weather as !weather Santa_Fe New_Mexico', ignore_extra=True, hidden=True, enabled=True) async def weather(self, ctx, city,", "commands from dotenv import load_dotenv load_dotenv() BOT_PREFIX = os.getenv('DISCORD_BOT_PREFIX') GUILD = os.getenv('DISCORD_GUILD') AIRVISUAL_KEY", "ignore_extra=True, hidden=True, enabled=True) async def weather(self, ctx, city, state): # This intended as", "f\"**Text Channels:**\\n - {text_channels}\\n\\n\" f\"**Voice Channels:**\\n - {voice_channels}\\n\\n\" f\"**Server Members: {guild.member_count}**\\n - {members}\")", "'good enough' tool. There are some accuracy issues # such as conversion of", "'Moderate': range(51, 100), 'Unhealthy for Sensitive Groups': range(101, 150), 'Unhealthy': range(151, 200), 'Very", "python3 \"\"\" Bishbot - https://github.com/ldgregory/bishbot <NAME> <<EMAIL>> fun.py v0.1 Tested to Python v3.7.3", "ABUSEIPDB_KEY = os.getenv('ABUSEIPDB_KEY') url = 'https://api.abuseipdb.com/api/v2/check' querystring = { 'ipAddress': ipAddress, 'maxAgeInDays': '90'", "'W': range(260, 281), 'WNW': range(282, 304), 'NW': range(305, 326), 'NNW': range(327, 348)} data", "map to ranges and print out a human friendly # equivilant. data +=", "required by applicable law or agreed to in writing, software distributed under the", "ban(self, ctx, member: discord.member, *, reason=None): if member.hasPermission('BAN_MEMBERS'): await member.ban(reason=reason) await ctx.send(f\"Banned {member.mention}\")", "x messages, defaults to 3', help='Clear x messages, defaults to 3', ignore_extra=True, hidden=False,", "data = f\"{jsonResponse['data']['city']}, {jsonResponse['data']['state']}, {jsonResponse['data']['country']}\\n\" data += f\"{jsonResponse['data']['location']['coordinates']}\\n\" # ic comes through like", "Unhealthy': range(201, 300), 'Hazardous': range(301, 500)} wind_mapping = {'North': range(349, 360), 'N': range(0,", "ctx, mention): guild = discord.utils.get(self.bot.guilds, name=GUILD) for member in guild.members: if str(member.id) ==", "Members', ignore_extra=True, hidden=False, enabled=True) async def members(self, ctx, showall=None): guild = discord.utils.get(self.bot.guilds, name=GUILD)", "= os.getenv('ABUSEIPDB_KEY') url = 'https://api.abuseipdb.com/api/v2/check' querystring = { 'ipAddress': ipAddress, 'maxAgeInDays': '90' }", "{guild.description}\\n\" f\"Region: {guild.region}\\n\" f\"File Size Limit: {guild.filesize_limit} bytes\\n\\n\" f\"**Text Channels:**\\n - {text_channels}\\n\\n\" f\"**Voice", "ctx.channel.send(f\"Server Name: {guild.name} (ID: {guild.id})\\n\" f\"Server Owner: {guild.owner} (ID: {guild.owner_id})\\n\" f\"Server Description: {guild.description}\\n\"", "await member.kick(reason=reason) await ctx.send(f\"Kicked {member.mention}\") @commands.command(name='member', description='Member information', help='Member information', ignore_extra=True, hidden=False, enabled=True)", "from server', ignore_extra=True, hidden=True, enabled=False) async def ban(self, ctx, member: discord.member, *, reason=None):", "member_name, member_discriminator = member.split('#') for ban_entry in banned_users: user = ban_entry.user if (user.name,", "ranges and print out a human friendly # equivilant. data += f\"Air Quality", "member.roles]).replace('@', ''))}\\n\") @commands.command(name='members', description='Current Members', help='Current Members', ignore_extra=True, hidden=False, enabled=True) async def members(self,", "intents 20200522 - Initial code Copyright 2020 <NAME> Licensed under the Apache License,", "member.kick(reason=reason) await ctx.send(f\"Kicked {member.mention}\") @commands.command(name='member', description='Member information', help='Member information', ignore_extra=True, hidden=False, enabled=True) async", "to F temps and wind_mapping where NNE is # actually 11.25 - 33.75", "a 'good enough' tool. There are some accuracy issues # such as conversion", "def unban(self, ctx, *, member): banned_users = await ctx.guild.bans() member_name, member_discriminator = member.split('#')", "= requests.request(method='GET', url=url) jsonResponse = json.loads(response.text) if jsonResponse['status'] == 'success': weather_mapping = {'01':", "range(151, 200), 'Very Unhealthy': range(201, 300), 'Hazardous': range(301, 500)} wind_mapping = {'North': range(349,", "= await ctx.guild.bans() member_name, member_discriminator = member.split('#') for ban_entry in banned_users: user =", "= '' for member in guild.members: if showall == 'showall': members += f\"-", "conversion of C to F temps and wind_mapping where NNE is # actually", "If get could find the role # await client.add_role(ctx.message.author, role) @commands.command(name='ping', description='Ping latency',", "C to F temps and wind_mapping where NNE is # actually 11.25 -", "about you', ignore_extra=True, hidden=True, enabled=True) async def whoami(self, ctx): await ctx.channel.send(f\"User Name: {ctx.author.name}\\nUser", "@commands.Cog.listener() async def on_ready(self): print('- Utility Cog loaded') @commands.command(name='ban', description='Ban member from server',", "wind_mapping = {'North': range(349, 360), 'N': range(0, 11), 'NNE': range(12, 34), 'NE': range(35,", "hidden=False, enabled=True) @commands.has_permissions(manage_messages=True) async def clear(self, ctx, amount=3): await ctx.channel.purge(limit=amount) @commands.command(name='ip_abuse', description='Get abuse", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "+= f\"- {member.display_name} : \" members += f\"{member.name} (\" members += ', '.join([role.name", "for role in member.roles]) + \")\\n\" else: members += f\"- {member.display_name}\\n\" await ctx.channel.send(f\"**Server", "range(327, 348)} data = f\"{jsonResponse['data']['city']}, {jsonResponse['data']['state']}, {jsonResponse['data']['country']}\\n\" data += f\"{jsonResponse['data']['location']['coordinates']}\\n\" # ic comes", "not use this file except in compliance with the License. You may obtain", "Fun commands for everyone Changelog: 20210606 - Fixed members command for intents 20200522", "Make sure to use underscores instead of spaces in city or state, i.e.", "@commands.command(name='member', description='Member information', help='Member information', ignore_extra=True, hidden=False, enabled=True) async def member(self, ctx, mention):", "print('- Utility Cog loaded') @commands.command(name='ban', description='Ban member from server', help='Ban member from server',", "@commands.command(name='ip_abuse', description='Get abuse score for IP', help='Get abuse score for IP', ignore_extra=True, hidden=False,", "*, reason=None): if member.hasPermission('KICK_MEMBERS'): await member.kick(reason=reason) await ctx.send(f\"Kicked {member.mention}\") @commands.command(name='member', description='Member information', help='Member", "{members}\") @commands.command(name='unban', description='Unban member from server', help='Unban member from server', ignore_extra=True, hidden=True, enabled=False)", "key, val in aqius_mapping.items(): if int(jsonResponse['data']['current']['pollution']['aqius']) in val: data += f\"{key}\\n\" data +=", "f\"Roles: {str(', '.join([role.name for role in member.roles]).replace('@', ''))}\\n\") @commands.command(name='members', description='Current Members', help='Current Members',", "hidden=True, enabled=False) async def kick(self, ctx, member: discord.member, *, reason=None): if member.hasPermission('KICK_MEMBERS'): await", "146), 'SSE': range(147, 169), 'S': range(170, 191), 'SSW': range(192, 214), 'SW': range(215, 236),", "and limitations under the License. \"\"\" import discord import json import os import", "+= f\"{k}: {v}\\n\" await ctx.channel.send(data) @commands.command(name='kick', description='Kick member off server', help='Kick member off", "async def whoami(self, ctx): await ctx.channel.send(f\"User Name: {ctx.author.name}\\nUser ID: {ctx.author.id}\") def setup(bot): bot.add_cog(Utility(bot))", "f\"Wind: {int(jsonResponse['data']['current']['weather']['ws'] * 2.236936)} m/h from \" for key, val in wind_mapping.items(): if", "text_channels = '\\n - '.join([channel.name for channel in guild.text_channels]) voice_channels = '\\n -", "description='Info about you', help='Info about you', ignore_extra=True, hidden=True, enabled=True) async def whoami(self, ctx):", "= { 'ipAddress': ipAddress, 'maxAgeInDays': '90' } headers = { 'Accept': 'application/json', 'Key':", "{member.mention}\") @commands.command(name='clear', description='Clear x messages, defaults to 3', help='Clear x messages, defaults to", "79), 'E': range(80, 101), 'ESE': range(102, 124), 'SE': range(125, 146), 'SSE': range(147, 169),", "get(ctx.message.server.roles, name='ROLE_NAME') # if role: # If get could find the role #", "NNE is # actually 11.25 - 33.75 degrees vs the ints required by", "friendly # equivilant. data += f\"Air Quality Index: {jsonResponse['data']['current']['pollution']['aqius']} - \" for key,", "281), 'WNW': range(282, 304), 'NW': range(305, 326), 'NNW': range(327, 348)} data = f\"{jsonResponse['data']['city']},", "* 1000)} ms\") @commands.command(name='server', description='Server information', help='Server information', ignore_extra=True, hidden=False, enabled=True) @commands.has_role('admins') async", "url=url) jsonResponse = json.loads(response.text) if jsonResponse['status'] == 'success': weather_mapping = {'01': 'Clear Sky',", "AIRVISUAL_KEY = os.getenv('AIRVISUAL_KEY') class Utility(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async", "help='Kick member off server', ignore_extra=True, hidden=True, enabled=False) async def kick(self, ctx, member: discord.member,", "str(member.id) == str(mention.lstrip('<@!').rstrip('>')): await ctx.channel.send(f\"Nickname: {member.nick}\\n\" f\"Discord Name: {member.name}#{member.discriminator}\\n\" f\"Discord ID: {member.id}\\n\" f\"Joined:", "{jsonResponse['data']['current']['weather']['hu']}%\\n\" data += f\"Wind: {int(jsonResponse['data']['current']['weather']['ws'] * 2.236936)} m/h from \" for key, val", "f\"Joined: {member.joined_at}\\n\" f\"Status: {member.status}\\n\" # f\"Is on Mobile: {member.is_on_mobile}\\n\" # f\"Activity: {str(member.activity.type).lstrip('ActivityType.')} {member.activity.name}\\n\"", "ANY KIND, either express or implied. See the License for the specific language", "2020 <NAME> Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "/usr/bin/env python3 \"\"\" Bishbot - https://github.com/ldgregory/bishbot <NAME> <<EMAIL>> fun.py v0.1 Tested to Python", "== str(mention.lstrip('<@!').rstrip('>')): await ctx.channel.send(f\"Nickname: {member.nick}\\n\" f\"Discord Name: {member.name}#{member.discriminator}\\n\" f\"Discord ID: {member.id}\\n\" f\"Joined: {member.joined_at}\\n\"", "{guild.member_count}**\\n{members.replace('@', '')}\") @commands.command(name='nickname', description='Change nickname', help='Change nickname', ignore_extra=True, hidden=False, enabled=True) async def nickname(self,", "range(35, 56), 'ENE': range(57, 79), 'E': range(80, 101), 'ESE': range(102, 124), 'SE': range(125,", "{member.display_name} : \" members += f\"{member.name} (\" members += ', '.join([role.name for role", "== (member_name, member_discriminator): await ctx.guild.unban(user) await ctx.send(f\"Unbanned {user.mention}\") return @commands.command(name='weather', description='Weather as !weather", "+= f\"{jsonResponse['data']['location']['coordinates']}\\n\" # ic comes through like 01d or 01n to differentiate day", "file except in compliance with the License. You may obtain a copy of", "56), 'ENE': range(57, 79), 'E': range(80, 101), 'ESE': range(102, 124), 'SE': range(125, 146),", "description='Kick member off server', help='Kick member off server', ignore_extra=True, hidden=True, enabled=False) async def", "+= f\"{member.name} (\" members += ', '.join([role.name for role in member.roles]) + \")\\n\"", "'SE': range(125, 146), 'SSE': range(147, 169), 'S': range(170, 191), 'SSW': range(192, 214), 'SW':", "<<EMAIL>> fun.py v0.1 Tested to Python v3.7.3 Description: Fun commands for everyone Changelog:", "discord.member, *, reason=None): if member.hasPermission('KICK_MEMBERS'): await member.kick(reason=reason) await ctx.send(f\"Kicked {member.mention}\") @commands.command(name='member', description='Member information',", "enabled=True) async def whoami(self, ctx): await ctx.channel.send(f\"User Name: {ctx.author.name}\\nUser ID: {ctx.author.id}\") def setup(bot):", "for member in guild.members]) await ctx.channel.send(f\"Server Name: {guild.name} (ID: {guild.id})\\n\" f\"Server Owner: {guild.owner}", "version. data += f\"{weather_mapping[jsonResponse['data']['current']['weather']['ic'][:-1]]}\\n\" # Print out the AQIUS then do a map", "data += f\"Air Quality Index: {jsonResponse['data']['current']['pollution']['aqius']} - \" for key, val in aqius_mapping.items():", "range(305, 326), 'NNW': range(327, 348)} data = f\"{jsonResponse['data']['city']}, {jsonResponse['data']['state']}, {jsonResponse['data']['country']}\\n\" data += f\"{jsonResponse['data']['location']['coordinates']}\\n\"", "val in aqius_mapping.items(): if int(jsonResponse['data']['current']['pollution']['aqius']) in val: data += f\"{key}\\n\" data += f\"Temperature:", "'SSE': range(147, 169), 'S': range(170, 191), 'SSW': range(192, 214), 'SW': range(215, 236), 'WSW':", "'%20')}&country=USA&key={AIRVISUAL_KEY}\" response = requests.request(method='GET', url=url) jsonResponse = json.loads(response.text) if jsonResponse['status'] == 'success': weather_mapping", "2.0 (the \"License\"); you may not use this file except in compliance with", "ctx.channel.send(f\"No data found. Make sure to use underscores instead of spaces in city", "fun.py v0.1 Tested to Python v3.7.3 Description: Fun commands for everyone Changelog: 20210606", "'application/json', 'Key': ABUSEIPDB_KEY } response = requests.request(method='GET', url=url, headers=headers, params=querystring) ip_info = json.loads(response.text)", "'Broken Clouds', '09': 'Shower Rain', '10': 'Rain', '11': 'Thunderstorm', '13': 'Snow', '50': 'Mist'}", "member.hasPermission('BAN_MEMBERS'): await member.ban(reason=reason) await ctx.send(f\"Banned {member.mention}\") @commands.command(name='clear', description='Clear x messages, defaults to 3',", "hidden=True, enabled=False) async def ban(self, ctx, member: discord.member, *, reason=None): if member.hasPermission('BAN_MEMBERS'): await", "os import requests from discord.ext import commands from dotenv import load_dotenv load_dotenv() BOT_PREFIX", ": \" members += f\"{member.name} (\" members += ', '.join([role.name for role in", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "k, v in ip_info['data'].items(): data += f\"{k}: {v}\\n\" await ctx.channel.send(data) @commands.command(name='kick', description='Kick member", "+= ', '.join([role.name for role in member.roles]) + \")\\n\" else: members += f\"-", "@commands.command(name='weather', description='Weather as !weather CITY STATE', help='Weather as !weather Santa_Fe New_Mexico', ignore_extra=True, hidden=True,", "3', ignore_extra=True, hidden=False, enabled=True) @commands.has_permissions(manage_messages=True) async def clear(self, ctx, amount=3): await ctx.channel.purge(limit=amount) @commands.command(name='ip_abuse',", "Members', help='Current Members', ignore_extra=True, hidden=False, enabled=True) async def members(self, ctx, showall=None): guild =", "data += f\"{weather_mapping[jsonResponse['data']['current']['weather']['ic'][:-1]]}\\n\" # Print out the AQIUS then do a map to", "the License. \"\"\" import discord import json import os import requests from discord.ext", "- '.join([channel.name for channel in guild.text_channels]) voice_channels = '\\n - '.join([channel.name for channel", "214), 'SW': range(215, 236), 'WSW': range(237, 259), 'W': range(260, 281), 'WNW': range(282, 304),", "the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "'Very Unhealthy': range(201, 300), 'Hazardous': range(301, 500)} wind_mapping = {'North': range(349, 360), 'N':", "data += f\"Pressure: {jsonResponse['data']['current']['weather']['pr']} hPa\\n\" data += f\"Humidity: {jsonResponse['data']['current']['weather']['hu']}%\\n\" data += f\"Wind: {int(jsonResponse['data']['current']['weather']['ws']", "f\"Server Description: {guild.description}\\n\" f\"Region: {guild.region}\\n\" f\"File Size Limit: {guild.filesize_limit} bytes\\n\\n\" f\"**Text Channels:**\\n -", "guild = discord.utils.get(self.bot.guilds, name=GUILD) for member in guild.members: if str(member.id) == str(mention.lstrip('<@!').rstrip('>')): await", "236), 'WSW': range(237, 259), 'W': range(260, 281), 'WNW': range(282, 304), 'NW': range(305, 326),", "role) @commands.command(name='ping', description='Ping latency', help='Ping latency', ignore_extra=True, hidden=False, enabled=True) async def ping(self, ctx):", "(the \"License\"); you may not use this file except in compliance with the", "- {voice_channels}\\n\\n\" f\"**Server Members: {guild.member_count}**\\n - {members}\") @commands.command(name='unban', description='Unban member from server', help='Unban", "= discord.utils.get(self.bot.guilds, name=GUILD) members = '' for member in guild.members: if showall ==", "'ESE': range(102, 124), 'SE': range(125, 146), 'SSE': range(147, 169), 'S': range(170, 191), 'SSW':", "from server', help='Unban member from server', ignore_extra=True, hidden=True, enabled=False) async def unban(self, ctx,", "enabled=False) async def ban(self, ctx, member: discord.member, *, reason=None): if member.hasPermission('BAN_MEMBERS'): await member.ban(reason=reason)", "description='Clear x messages, defaults to 3', help='Clear x messages, defaults to 3', ignore_extra=True,", "friendly text version. data += f\"{weather_mapping[jsonResponse['data']['current']['weather']['ic'][:-1]]}\\n\" # Print out the AQIUS then do", "could find the role # await client.add_role(ctx.message.author, role) @commands.command(name='ping', description='Ping latency', help='Ping latency',", "= os.getenv('DISCORD_BOT_PREFIX') GUILD = os.getenv('DISCORD_GUILD') AIRVISUAL_KEY = os.getenv('AIRVISUAL_KEY') class Utility(commands.Cog): def __init__(self, bot):", "# Print out the AQIUS then do a map to ranges and print", "*, member): banned_users = await ctx.guild.bans() member_name, member_discriminator = member.split('#') for ban_entry in", "and wind_mapping where NNE is # actually 11.25 - 33.75 degrees vs the", "the numerical part to the human friendly text version. data += f\"{weather_mapping[jsonResponse['data']['current']['weather']['ic'][:-1]]}\\n\" #", "async def server(self, ctx): guild = discord.utils.get(self.bot.guilds, name=GUILD) text_channels = '\\n - '.join([channel.name", "11), 'NNE': range(12, 34), 'NE': range(35, 56), 'ENE': range(57, 79), 'E': range(80, 101),", "ctx, member: discord.member, *, reason=None): if member.hasPermission('BAN_MEMBERS'): await member.ban(reason=reason) await ctx.send(f\"Banned {member.mention}\") @commands.command(name='clear',", "STATE', help='Weather as !weather Santa_Fe New_Mexico', ignore_extra=True, hidden=True, enabled=True) async def weather(self, ctx,", "voice_channels = '\\n - '.join([channel.name for channel in guild.voice_channels]) members = '\\n -", "kick(self, ctx, member: discord.member, *, reason=None): if member.hasPermission('KICK_MEMBERS'): await member.kick(reason=reason) await ctx.send(f\"Kicked {member.mention}\")", "data += f\"{jsonResponse['data']['current']['weather']['wd']}° ({key})\\n\" await ctx.channel.send(data) else: await ctx.channel.send(f\"No data found. Make sure", "enough' tool. There are some accuracy issues # such as conversion of C", "member from server', ignore_extra=True, hidden=True, enabled=False) async def ban(self, ctx, member: discord.member, *,", "permissions and limitations under the License. \"\"\" import discord import json import os", "val: data += f\"{key}\\n\" data += f\"Temperature: {int(((jsonResponse['data']['current']['weather']['tp']) * 9) / 5) +", "member off server', help='Kick member off server', ignore_extra=True, hidden=True, enabled=False) async def kick(self,", "off server', help='Kick member off server', ignore_extra=True, hidden=True, enabled=False) async def kick(self, ctx,", "(member_name, member_discriminator): await ctx.guild.unban(user) await ctx.send(f\"Unbanned {user.mention}\") return @commands.command(name='weather', description='Weather as !weather CITY", "state, i.e. Sante_Fe New_Mexico\") @commands.command(name='whoami', description='Info about you', help='Info about you', ignore_extra=True, hidden=True,", "from \" for key, val in wind_mapping.items(): if int(jsonResponse['data']['current']['weather']['wd']) in val: data +=", "'Clear Sky', '02': 'Few Clouds', '03': 'Scattered Clouds', '04': 'Broken Clouds', '09': 'Shower", "range(101, 150), 'Unhealthy': range(151, 200), 'Very Unhealthy': range(201, 300), 'Hazardous': range(301, 500)} wind_mapping", "hidden=False, enabled=True) async def ping(self, ctx): await ctx.channel.send(f\"Pong... {round(self.bot.latency * 1000)} ms\") @commands.command(name='server',", "f\"{key}\\n\" data += f\"Temperature: {int(((jsonResponse['data']['current']['weather']['tp']) * 9) / 5) + 32}°F\\n\" data +=", "loaded') @commands.command(name='ban', description='Ban member from server', help='Ban member from server', ignore_extra=True, hidden=True, enabled=False)", "hidden=False, enabled=True) @commands.has_role('admins') async def server(self, ctx): guild = discord.utils.get(self.bot.guilds, name=GUILD) text_channels =", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed", "enabled=False) async def unban(self, ctx, *, member): banned_users = await ctx.guild.bans() member_name, member_discriminator", "member.ban(reason=reason) await ctx.send(f\"Banned {member.mention}\") @commands.command(name='clear', description='Clear x messages, defaults to 3', help='Clear x", "{guild.name} (ID: {guild.id})\\n\" f\"Server Owner: {guild.owner} (ID: {guild.owner_id})\\n\" f\"Server Description: {guild.description}\\n\" f\"Region: {guild.region}\\n\"", "text version. data += f\"{weather_mapping[jsonResponse['data']['current']['weather']['ic'][:-1]]}\\n\" # Print out the AQIUS then do a", "member in guild.members: if showall == 'showall': members += f\"- {member.display_name} : \"", "about you', help='Info about you', ignore_extra=True, hidden=True, enabled=True) async def whoami(self, ctx): await", "found. Make sure to use underscores instead of spaces in city or state,", "off server', ignore_extra=True, hidden=True, enabled=False) async def kick(self, ctx, member: discord.member, *, reason=None):", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "\" members += f\"{member.name} (\" members += ', '.join([role.name for role in member.roles])", "<NAME> Licensed under the Apache License, Version 2.0 (the \"License\"); you may not", "nickname): await ctx.author.edit(nick=f\"{nickname}\") # role = get(ctx.message.server.roles, name='ROLE_NAME') # if role: # If", "data += f\"Temperature: {int(((jsonResponse['data']['current']['weather']['tp']) * 9) / 5) + 32}°F\\n\" data += f\"Pressure:", "f\"Guild Permissions: {member.guild_permissions}\\n\" f\"Top Role: {member.top_role}\\n\" f\"Roles: {str(', '.join([role.name for role in member.roles]).replace('@',", "await ctx.channel.send(f\"No data found. Make sure to use underscores instead of spaces in", "instead of spaces in city or state, i.e. Sante_Fe New_Mexico\") @commands.command(name='whoami', description='Info about", "name=GUILD) for member in guild.members: if str(member.id) == str(mention.lstrip('<@!').rstrip('>')): await ctx.channel.send(f\"Nickname: {member.nick}\\n\" f\"Discord", "member_discriminator): await ctx.guild.unban(user) await ctx.send(f\"Unbanned {user.mention}\") return @commands.command(name='weather', description='Weather as !weather CITY STATE',", "banned_users = await ctx.guild.bans() member_name, member_discriminator = member.split('#') for ban_entry in banned_users: user", "+= f\"- {member.display_name}\\n\" await ctx.channel.send(f\"**Server Members: {guild.member_count}**\\n{members.replace('@', '')}\") @commands.command(name='nickname', description='Change nickname', help='Change nickname',", "2.236936)} m/h from \" for key, val in wind_mapping.items(): if int(jsonResponse['data']['current']['weather']['wd']) in val:", "ping(self, ctx): await ctx.channel.send(f\"Pong... {round(self.bot.latency * 1000)} ms\") @commands.command(name='server', description='Server information', help='Server information',", "messages, defaults to 3', help='Clear x messages, defaults to 3', ignore_extra=True, hidden=False, enabled=True)", "data += f\"{jsonResponse['data']['location']['coordinates']}\\n\" # ic comes through like 01d or 01n to differentiate", "in val: data += f\"{key}\\n\" data += f\"Temperature: {int(((jsonResponse['data']['current']['weather']['tp']) * 9) / 5)", "law or agreed to in writing, software distributed under the License is distributed", "{v}\\n\" await ctx.channel.send(data) @commands.command(name='kick', description='Kick member off server', help='Kick member off server', ignore_extra=True,", "members = '' for member in guild.members: if showall == 'showall': members +=", "to 3', help='Clear x messages, defaults to 3', ignore_extra=True, hidden=False, enabled=True) @commands.has_permissions(manage_messages=True) async", "v in ip_info['data'].items(): data += f\"{k}: {v}\\n\" await ctx.channel.send(data) @commands.command(name='kick', description='Kick member off", "{str(', '.join([role.name for role in member.roles]).replace('@', ''))}\\n\") @commands.command(name='members', description='Current Members', help='Current Members', ignore_extra=True,", "191), 'SSW': range(192, 214), 'SW': range(215, 236), 'WSW': range(237, 259), 'W': range(260, 281),", "specific language governing permissions and limitations under the License. \"\"\" import discord import", "range(80, 101), 'ESE': range(102, 124), 'SE': range(125, 146), 'SSE': range(147, 169), 'S': range(170,", "Version 2.0 (the \"License\"); you may not use this file except in compliance", "to ranges and print out a human friendly # equivilant. data += f\"Air", "the Apache License, Version 2.0 (the \"License\"); you may not use this file", "f\"Is on Mobile: {member.is_on_mobile}\\n\" # f\"Activity: {str(member.activity.type).lstrip('ActivityType.')} {member.activity.name}\\n\" f\"Guild: {member.guild}\\n\" f\"Guild Permissions: {member.guild_permissions}\\n\"", "to Python v3.7.3 Description: Fun commands for everyone Changelog: 20210606 - Fixed members", "def member(self, ctx, mention): guild = discord.utils.get(self.bot.guilds, name=GUILD) for member in guild.members: if", "'ipAddress': ipAddress, 'maxAgeInDays': '90' } headers = { 'Accept': 'application/json', 'Key': ABUSEIPDB_KEY }", "f\"Discord Name: {member.name}#{member.discriminator}\\n\" f\"Discord ID: {member.id}\\n\" f\"Joined: {member.joined_at}\\n\" f\"Status: {member.status}\\n\" # f\"Is on", "required by range(). url = f\"https://api.airvisual.com/v2/city?city={city.replace('_', '%20')}&state={state.replace('_', '%20')}&country=USA&key={AIRVISUAL_KEY}\" response = requests.request(method='GET', url=url) jsonResponse", "range(282, 304), 'NW': range(305, 326), 'NNW': range(327, 348)} data = f\"{jsonResponse['data']['city']}, {jsonResponse['data']['state']}, {jsonResponse['data']['country']}\\n\"", "= '**Data provided by abuseipdb.com**\\n\\n' for k, v in ip_info['data'].items(): data += f\"{k}:", "{jsonResponse['data']['current']['weather']['pr']} hPa\\n\" data += f\"Humidity: {jsonResponse['data']['current']['weather']['hu']}%\\n\" data += f\"Wind: {int(jsonResponse['data']['current']['weather']['ws'] * 2.236936)} m/h", "members += f\"- {member.display_name}\\n\" await ctx.channel.send(f\"**Server Members: {guild.member_count}**\\n{members.replace('@', '')}\") @commands.command(name='nickname', description='Change nickname', help='Change", "member: discord.member, *, reason=None): if member.hasPermission('KICK_MEMBERS'): await member.kick(reason=reason) await ctx.send(f\"Kicked {member.mention}\") @commands.command(name='member', description='Member", "'03': 'Scattered Clouds', '04': 'Broken Clouds', '09': 'Shower Rain', '10': 'Rain', '11': 'Thunderstorm',", "ctx, ipAddress): ABUSEIPDB_KEY = os.getenv('ABUSEIPDB_KEY') url = 'https://api.abuseipdb.com/api/v2/check' querystring = { 'ipAddress': ipAddress,", "await ctx.author.edit(nick=f\"{nickname}\") # role = get(ctx.message.server.roles, name='ROLE_NAME') # if role: # If get", "f\"**Voice Channels:**\\n - {voice_channels}\\n\\n\" f\"**Server Members: {guild.member_count}**\\n - {members}\") @commands.command(name='unban', description='Unban member from", "!weather Santa_Fe New_Mexico', ignore_extra=True, hidden=True, enabled=True) async def weather(self, ctx, city, state): #", "Size Limit: {guild.filesize_limit} bytes\\n\\n\" f\"**Text Channels:**\\n - {text_channels}\\n\\n\" f\"**Voice Channels:**\\n - {voice_channels}\\n\\n\" f\"**Server", "async def ban(self, ctx, member: discord.member, *, reason=None): if member.hasPermission('BAN_MEMBERS'): await member.ban(reason=reason) await", "under the Apache License, Version 2.0 (the \"License\"); you may not use this", "in val: data += f\"{jsonResponse['data']['current']['weather']['wd']}° ({key})\\n\" await ctx.channel.send(data) else: await ctx.channel.send(f\"No data found.", "f\"{jsonResponse['data']['city']}, {jsonResponse['data']['state']}, {jsonResponse['data']['country']}\\n\" data += f\"{jsonResponse['data']['location']['coordinates']}\\n\" # ic comes through like 01d or", "+= f\"{jsonResponse['data']['current']['weather']['wd']}° ({key})\\n\" await ctx.channel.send(data) else: await ctx.channel.send(f\"No data found. Make sure to", "either express or implied. See the License for the specific language governing permissions", "@commands.command(name='ban', description='Ban member from server', help='Ban member from server', ignore_extra=True, hidden=True, enabled=False) async", "city, state): # This intended as a 'good enough' tool. There are some", "Print out the AQIUS then do a map to ranges and print out", "key, val in wind_mapping.items(): if int(jsonResponse['data']['current']['weather']['wd']) in val: data += f\"{jsonResponse['data']['current']['weather']['wd']}° ({key})\\n\" await", "data += f\"{key}\\n\" data += f\"Temperature: {int(((jsonResponse['data']['current']['weather']['tp']) * 9) / 5) + 32}°F\\n\"", "# f\"Is on Mobile: {member.is_on_mobile}\\n\" # f\"Activity: {str(member.activity.type).lstrip('ActivityType.')} {member.activity.name}\\n\" f\"Guild: {member.guild}\\n\" f\"Guild Permissions:", "async def ip_abuse(self, ctx, ipAddress): ABUSEIPDB_KEY = os.getenv('ABUSEIPDB_KEY') url = 'https://api.abuseipdb.com/api/v2/check' querystring =", "data += f\"Wind: {int(jsonResponse['data']['current']['weather']['ws'] * 2.236936)} m/h from \" for key, val in", "'04': 'Broken Clouds', '09': 'Shower Rain', '10': 'Rain', '11': 'Thunderstorm', '13': 'Snow', '50':", "hidden=False, enabled=True) async def ip_abuse(self, ctx, ipAddress): ABUSEIPDB_KEY = os.getenv('ABUSEIPDB_KEY') url = 'https://api.abuseipdb.com/api/v2/check'", "data = '**Data provided by abuseipdb.com**\\n\\n' for k, v in ip_info['data'].items(): data +=", "tool. There are some accuracy issues # such as conversion of C to", "Apache License, Version 2.0 (the \"License\"); you may not use this file except", "or implied. See the License for the specific language governing permissions and limitations", "'10': 'Rain', '11': 'Thunderstorm', '13': 'Snow', '50': 'Mist'} aqius_mapping = {'Good': range(0, 50),", "} headers = { 'Accept': 'application/json', 'Key': ABUSEIPDB_KEY } response = requests.request(method='GET', url=url,", "'showall': members += f\"- {member.display_name} : \" members += f\"{member.name} (\" members +=", "client.add_role(ctx.message.author, role) @commands.command(name='ping', description='Ping latency', help='Ping latency', ignore_extra=True, hidden=False, enabled=True) async def ping(self,", "response = requests.request(method='GET', url=url) jsonResponse = json.loads(response.text) if jsonResponse['status'] == 'success': weather_mapping =", "+ 32}°F\\n\" data += f\"Pressure: {jsonResponse['data']['current']['weather']['pr']} hPa\\n\" data += f\"Humidity: {jsonResponse['data']['current']['weather']['hu']}%\\n\" data +=", "enabled=True) async def member(self, ctx, mention): guild = discord.utils.get(self.bot.guilds, name=GUILD) for member in", "@commands.command(name='clear', description='Clear x messages, defaults to 3', help='Clear x messages, defaults to 3',", "for IP', ignore_extra=True, hidden=False, enabled=True) async def ip_abuse(self, ctx, ipAddress): ABUSEIPDB_KEY = os.getenv('ABUSEIPDB_KEY')", "{jsonResponse['data']['current']['pollution']['aqius']} - \" for key, val in aqius_mapping.items(): if int(jsonResponse['data']['current']['pollution']['aqius']) in val: data", "CITY STATE', help='Weather as !weather Santa_Fe New_Mexico', ignore_extra=True, hidden=True, enabled=True) async def weather(self,", "{'01': 'Clear Sky', '02': 'Few Clouds', '03': 'Scattered Clouds', '04': 'Broken Clouds', '09':", "'13': 'Snow', '50': 'Mist'} aqius_mapping = {'Good': range(0, 50), 'Moderate': range(51, 100), 'Unhealthy", "part to the human friendly text version. data += f\"{weather_mapping[jsonResponse['data']['current']['weather']['ic'][:-1]]}\\n\" # Print out", "don't care # so we're just mapping the numerical part to the human", "help='Current Members', ignore_extra=True, hidden=False, enabled=True) async def members(self, ctx, showall=None): guild = discord.utils.get(self.bot.guilds,", "hidden=False, enabled=True) async def member(self, ctx, mention): guild = discord.utils.get(self.bot.guilds, name=GUILD) for member", "bytes\\n\\n\" f\"**Text Channels:**\\n - {text_channels}\\n\\n\" f\"**Voice Channels:**\\n - {voice_channels}\\n\\n\" f\"**Server Members: {guild.member_count}**\\n -", "@commands.command(name='nickname', description='Change nickname', help='Change nickname', ignore_extra=True, hidden=False, enabled=True) async def nickname(self, ctx, *,", "bot): self.bot = bot @commands.Cog.listener() async def on_ready(self): print('- Utility Cog loaded') @commands.command(name='ban',", "{member.name}#{member.discriminator}\\n\" f\"Discord ID: {member.id}\\n\" f\"Joined: {member.joined_at}\\n\" f\"Status: {member.status}\\n\" # f\"Is on Mobile: {member.is_on_mobile}\\n\"", "hidden=True, enabled=True) async def whoami(self, ctx): await ctx.channel.send(f\"User Name: {ctx.author.name}\\nUser ID: {ctx.author.id}\") def", "is # actually 11.25 - 33.75 degrees vs the ints required by range().", "member: discord.member, *, reason=None): if member.hasPermission('BAN_MEMBERS'): await member.ban(reason=reason) await ctx.send(f\"Banned {member.mention}\") @commands.command(name='clear', description='Clear", "f\"Top Role: {member.top_role}\\n\" f\"Roles: {str(', '.join([role.name for role in member.roles]).replace('@', ''))}\\n\") @commands.command(name='members', description='Current", "{text_channels}\\n\\n\" f\"**Voice Channels:**\\n - {voice_channels}\\n\\n\" f\"**Server Members: {guild.member_count}**\\n - {members}\") @commands.command(name='unban', description='Unban member", "guild = discord.utils.get(self.bot.guilds, name=GUILD) text_channels = '\\n - '.join([channel.name for channel in guild.text_channels])", "else: members += f\"- {member.display_name}\\n\" await ctx.channel.send(f\"**Server Members: {guild.member_count}**\\n{members.replace('@', '')}\") @commands.command(name='nickname', description='Change nickname',", "f\"Air Quality Index: {jsonResponse['data']['current']['pollution']['aqius']} - \" for key, val in aqius_mapping.items(): if int(jsonResponse['data']['current']['pollution']['aqius'])", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "{round(self.bot.latency * 1000)} ms\") @commands.command(name='server', description='Server information', help='Server information', ignore_extra=True, hidden=False, enabled=True) @commands.has_role('admins')", "= 'https://api.abuseipdb.com/api/v2/check' querystring = { 'ipAddress': ipAddress, 'maxAgeInDays': '90' } headers = {", "f\"File Size Limit: {guild.filesize_limit} bytes\\n\\n\" f\"**Text Channels:**\\n - {text_channels}\\n\\n\" f\"**Voice Channels:**\\n - {voice_channels}\\n\\n\"", "enabled=True) async def weather(self, ctx, city, state): # This intended as a 'good", "(ID: {guild.owner_id})\\n\" f\"Server Description: {guild.description}\\n\" f\"Region: {guild.region}\\n\" f\"File Size Limit: {guild.filesize_limit} bytes\\n\\n\" f\"**Text", "= os.getenv('AIRVISUAL_KEY') class Utility(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def", "'maxAgeInDays': '90' } headers = { 'Accept': 'application/json', 'Key': ABUSEIPDB_KEY } response =", "f\"{weather_mapping[jsonResponse['data']['current']['weather']['ic'][:-1]]}\\n\" # Print out the AQIUS then do a map to ranges and", "url = 'https://api.abuseipdb.com/api/v2/check' querystring = { 'ipAddress': ipAddress, 'maxAgeInDays': '90' } headers =", "3', help='Clear x messages, defaults to 3', ignore_extra=True, hidden=False, enabled=True) @commands.has_permissions(manage_messages=True) async def", "ban_entry in banned_users: user = ban_entry.user if (user.name, user.discriminator) == (member_name, member_discriminator): await", "to in writing, software distributed under the License is distributed on an \"AS", "int(jsonResponse['data']['current']['pollution']['aqius']) in val: data += f\"{key}\\n\" data += f\"Temperature: {int(((jsonResponse['data']['current']['weather']['tp']) * 9) /", "hidden=True, enabled=False) async def unban(self, ctx, *, member): banned_users = await ctx.guild.bans() member_name,", "IP', help='Get abuse score for IP', ignore_extra=True, hidden=False, enabled=True) async def ip_abuse(self, ctx,", "range(201, 300), 'Hazardous': range(301, 500)} wind_mapping = {'North': range(349, 360), 'N': range(0, 11),", "city or state, i.e. Sante_Fe New_Mexico\") @commands.command(name='whoami', description='Info about you', help='Info about you',", "= ban_entry.user if (user.name, user.discriminator) == (member_name, member_discriminator): await ctx.guild.unban(user) await ctx.send(f\"Unbanned {user.mention}\")", "await ctx.channel.send(f\"Nickname: {member.nick}\\n\" f\"Discord Name: {member.name}#{member.discriminator}\\n\" f\"Discord ID: {member.id}\\n\" f\"Joined: {member.joined_at}\\n\" f\"Status: {member.status}\\n\"", "{member.status}\\n\" # f\"Is on Mobile: {member.is_on_mobile}\\n\" # f\"Activity: {str(member.activity.type).lstrip('ActivityType.')} {member.activity.name}\\n\" f\"Guild: {member.guild}\\n\" f\"Guild", "ignore_extra=True, hidden=False, enabled=True) async def nickname(self, ctx, *, nickname): await ctx.author.edit(nick=f\"{nickname}\") # role", "Changelog: 20210606 - Fixed members command for intents 20200522 - Initial code Copyright", "except in compliance with the License. You may obtain a copy of the", "such as conversion of C to F temps and wind_mapping where NNE is", "*, nickname): await ctx.author.edit(nick=f\"{nickname}\") # role = get(ctx.message.server.roles, name='ROLE_NAME') # if role: #", "\"\"\" import discord import json import os import requests from discord.ext import commands", "'.join([channel.name for channel in guild.text_channels]) voice_channels = '\\n - '.join([channel.name for channel in", "500)} wind_mapping = {'North': range(349, 360), 'N': range(0, 11), 'NNE': range(12, 34), 'NE':", "# equivilant. data += f\"Air Quality Index: {jsonResponse['data']['current']['pollution']['aqius']} - \" for key, val", "weather(self, ctx, city, state): # This intended as a 'good enough' tool. There", "name=GUILD) text_channels = '\\n - '.join([channel.name for channel in guild.text_channels]) voice_channels = '\\n", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "# This intended as a 'good enough' tool. There are some accuracy issues", "ctx.channel.send(data) @commands.command(name='kick', description='Kick member off server', help='Kick member off server', ignore_extra=True, hidden=True, enabled=False)", "class Utility(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_ready(self): print('-", "{guild.id})\\n\" f\"Server Owner: {guild.owner} (ID: {guild.owner_id})\\n\" f\"Server Description: {guild.description}\\n\" f\"Region: {guild.region}\\n\" f\"File Size", "some accuracy issues # such as conversion of C to F temps and", "{member.is_on_mobile}\\n\" # f\"Activity: {str(member.activity.type).lstrip('ActivityType.')} {member.activity.name}\\n\" f\"Guild: {member.guild}\\n\" f\"Guild Permissions: {member.guild_permissions}\\n\" f\"Top Role: {member.top_role}\\n\"", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "description='Unban member from server', help='Unban member from server', ignore_extra=True, hidden=True, enabled=False) async def", "member.split('#') for ban_entry in banned_users: user = ban_entry.user if (user.name, user.discriminator) == (member_name,", "user = ban_entry.user if (user.name, user.discriminator) == (member_name, member_discriminator): await ctx.guild.unban(user) await ctx.send(f\"Unbanned", "response = requests.request(method='GET', url=url, headers=headers, params=querystring) ip_info = json.loads(response.text) data = '**Data provided", "+ \")\\n\" else: members += f\"- {member.display_name}\\n\" await ctx.channel.send(f\"**Server Members: {guild.member_count}**\\n{members.replace('@', '')}\") @commands.command(name='nickname',", "ignore_extra=True, hidden=False, enabled=True) async def member(self, ctx, mention): guild = discord.utils.get(self.bot.guilds, name=GUILD) for", "(user.name, user.discriminator) == (member_name, member_discriminator): await ctx.guild.unban(user) await ctx.send(f\"Unbanned {user.mention}\") return @commands.command(name='weather', description='Weather", "by range(). url = f\"https://api.airvisual.com/v2/city?city={city.replace('_', '%20')}&state={state.replace('_', '%20')}&country=USA&key={AIRVISUAL_KEY}\" response = requests.request(method='GET', url=url) jsonResponse =", "= json.loads(response.text) if jsonResponse['status'] == 'success': weather_mapping = {'01': 'Clear Sky', '02': 'Few", "32}°F\\n\" data += f\"Pressure: {jsonResponse['data']['current']['weather']['pr']} hPa\\n\" data += f\"Humidity: {jsonResponse['data']['current']['weather']['hu']}%\\n\" data += f\"Wind:", "Cog loaded') @commands.command(name='ban', description='Ban member from server', help='Ban member from server', ignore_extra=True, hidden=True,", "'NNE': range(12, 34), 'NE': range(35, 56), 'ENE': range(57, 79), 'E': range(80, 101), 'ESE':", "ctx.send(f\"Kicked {member.mention}\") @commands.command(name='member', description='Member information', help='Member information', ignore_extra=True, hidden=False, enabled=True) async def member(self,", "help='Ban member from server', ignore_extra=True, hidden=True, enabled=False) async def ban(self, ctx, member: discord.member,", "1000)} ms\") @commands.command(name='server', description='Server information', help='Server information', ignore_extra=True, hidden=False, enabled=True) @commands.has_role('admins') async def", "await ctx.send(f\"Unbanned {user.mention}\") return @commands.command(name='weather', description='Weather as !weather CITY STATE', help='Weather as !weather", "Quality Index: {jsonResponse['data']['current']['pollution']['aqius']} - \" for key, val in aqius_mapping.items(): if int(jsonResponse['data']['current']['pollution']['aqius']) in", "50), 'Moderate': range(51, 100), 'Unhealthy for Sensitive Groups': range(101, 150), 'Unhealthy': range(151, 200),", "range(125, 146), 'SSE': range(147, 169), 'S': range(170, 191), 'SSW': range(192, 214), 'SW': range(215,", "{member.display_name}\\n\" await ctx.channel.send(f\"**Server Members: {guild.member_count}**\\n{members.replace('@', '')}\") @commands.command(name='nickname', description='Change nickname', help='Change nickname', ignore_extra=True, hidden=False,", "nickname', help='Change nickname', ignore_extra=True, hidden=False, enabled=True) async def nickname(self, ctx, *, nickname): await", "if role: # If get could find the role # await client.add_role(ctx.message.author, role)", "9) / 5) + 32}°F\\n\" data += f\"Pressure: {jsonResponse['data']['current']['weather']['pr']} hPa\\n\" data += f\"Humidity:", "headers = { 'Accept': 'application/json', 'Key': ABUSEIPDB_KEY } response = requests.request(method='GET', url=url, headers=headers,", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing,", "range(215, 236), 'WSW': range(237, 259), 'W': range(260, 281), 'WNW': range(282, 304), 'NW': range(305,", "{guild.owner} (ID: {guild.owner_id})\\n\" f\"Server Description: {guild.description}\\n\" f\"Region: {guild.region}\\n\" f\"File Size Limit: {guild.filesize_limit} bytes\\n\\n\"", "'Accept': 'application/json', 'Key': ABUSEIPDB_KEY } response = requests.request(method='GET', url=url, headers=headers, params=querystring) ip_info =", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "os.getenv('AIRVISUAL_KEY') class Utility(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_ready(self):", "f\"Humidity: {jsonResponse['data']['current']['weather']['hu']}%\\n\" data += f\"Wind: {int(jsonResponse['data']['current']['weather']['ws'] * 2.236936)} m/h from \" for key,", "role # await client.add_role(ctx.message.author, role) @commands.command(name='ping', description='Ping latency', help='Ping latency', ignore_extra=True, hidden=False, enabled=True)", "= f\"https://api.airvisual.com/v2/city?city={city.replace('_', '%20')}&state={state.replace('_', '%20')}&country=USA&key={AIRVISUAL_KEY}\" response = requests.request(method='GET', url=url) jsonResponse = json.loads(response.text) if jsonResponse['status']", "ints required by range(). url = f\"https://api.airvisual.com/v2/city?city={city.replace('_', '%20')}&state={state.replace('_', '%20')}&country=USA&key={AIRVISUAL_KEY}\" response = requests.request(method='GET', url=url)", "IP', ignore_extra=True, hidden=False, enabled=True) async def ip_abuse(self, ctx, ipAddress): ABUSEIPDB_KEY = os.getenv('ABUSEIPDB_KEY') url", "= '\\n - '.join([member.name for member in guild.members]) await ctx.channel.send(f\"Server Name: {guild.name} (ID:", "Name: {guild.name} (ID: {guild.id})\\n\" f\"Server Owner: {guild.owner} (ID: {guild.owner_id})\\n\" f\"Server Description: {guild.description}\\n\" f\"Region:", "we're just mapping the numerical part to the human friendly text version. data", "{member.joined_at}\\n\" f\"Status: {member.status}\\n\" # f\"Is on Mobile: {member.is_on_mobile}\\n\" # f\"Activity: {str(member.activity.type).lstrip('ActivityType.')} {member.activity.name}\\n\" f\"Guild:", "description='Change nickname', help='Change nickname', ignore_extra=True, hidden=False, enabled=True) async def nickname(self, ctx, *, nickname):", "5) + 32}°F\\n\" data += f\"Pressure: {jsonResponse['data']['current']['weather']['pr']} hPa\\n\" data += f\"Humidity: {jsonResponse['data']['current']['weather']['hu']}%\\n\" data", "command for intents 20200522 - Initial code Copyright 2020 <NAME> Licensed under the", "+= f\"Temperature: {int(((jsonResponse['data']['current']['weather']['tp']) * 9) / 5) + 32}°F\\n\" data += f\"Pressure: {jsonResponse['data']['current']['weather']['pr']}", "await ctx.guild.bans() member_name, member_discriminator = member.split('#') for ban_entry in banned_users: user = ban_entry.user", "Description: Fun commands for everyone Changelog: 20210606 - Fixed members command for intents", "comes through like 01d or 01n to differentiate day or night, we don't", "{member.mention}\") @commands.command(name='member', description='Member information', help='Member information', ignore_extra=True, hidden=False, enabled=True) async def member(self, ctx,", "f\"Activity: {str(member.activity.type).lstrip('ActivityType.')} {member.activity.name}\\n\" f\"Guild: {member.guild}\\n\" f\"Guild Permissions: {member.guild_permissions}\\n\" f\"Top Role: {member.top_role}\\n\" f\"Roles: {str(',", "20210606 - Fixed members command for intents 20200522 - Initial code Copyright 2020", "ignore_extra=True, hidden=True, enabled=False) async def ban(self, ctx, member: discord.member, *, reason=None): if member.hasPermission('BAN_MEMBERS'):", "you', help='Info about you', ignore_extra=True, hidden=True, enabled=True) async def whoami(self, ctx): await ctx.channel.send(f\"User", "'E': range(80, 101), 'ESE': range(102, 124), 'SE': range(125, 146), 'SSE': range(147, 169), 'S':", "compliance with the License. You may obtain a copy of the License at", "bot @commands.Cog.listener() async def on_ready(self): print('- Utility Cog loaded') @commands.command(name='ban', description='Ban member from", "a map to ranges and print out a human friendly # equivilant. data", "equivilant. data += f\"Air Quality Index: {jsonResponse['data']['current']['pollution']['aqius']} - \" for key, val in", "abuse score for IP', ignore_extra=True, hidden=False, enabled=True) async def ip_abuse(self, ctx, ipAddress): ABUSEIPDB_KEY", "ignore_extra=True, hidden=True, enabled=False) async def kick(self, ctx, member: discord.member, *, reason=None): if member.hasPermission('KICK_MEMBERS'):", "'\\n - '.join([channel.name for channel in guild.text_channels]) voice_channels = '\\n - '.join([channel.name for", "to 3', ignore_extra=True, hidden=False, enabled=True) @commands.has_permissions(manage_messages=True) async def clear(self, ctx, amount=3): await ctx.channel.purge(limit=amount)", "def on_ready(self): print('- Utility Cog loaded') @commands.command(name='ban', description='Ban member from server', help='Ban member", "for member in guild.members: if str(member.id) == str(mention.lstrip('<@!').rstrip('>')): await ctx.channel.send(f\"Nickname: {member.nick}\\n\" f\"Discord Name:", "out the AQIUS then do a map to ranges and print out a", "discord.utils.get(self.bot.guilds, name=GUILD) members = '' for member in guild.members: if showall == 'showall':", "as !weather Santa_Fe New_Mexico', ignore_extra=True, hidden=True, enabled=True) async def weather(self, ctx, city, state):", "express or implied. See the License for the specific language governing permissions and", "range(). url = f\"https://api.airvisual.com/v2/city?city={city.replace('_', '%20')}&state={state.replace('_', '%20')}&country=USA&key={AIRVISUAL_KEY}\" response = requests.request(method='GET', url=url) jsonResponse = json.loads(response.text)", "language governing permissions and limitations under the License. \"\"\" import discord import json", "@commands.command(name='whoami', description='Info about you', help='Info about you', ignore_extra=True, hidden=True, enabled=True) async def whoami(self,", "await ctx.guild.unban(user) await ctx.send(f\"Unbanned {user.mention}\") return @commands.command(name='weather', description='Weather as !weather CITY STATE', help='Weather", "async def ping(self, ctx): await ctx.channel.send(f\"Pong... {round(self.bot.latency * 1000)} ms\") @commands.command(name='server', description='Server information',", "= member.split('#') for ban_entry in banned_users: user = ban_entry.user if (user.name, user.discriminator) ==", "200), 'Very Unhealthy': range(201, 300), 'Hazardous': range(301, 500)} wind_mapping = {'North': range(349, 360),", "\" for key, val in aqius_mapping.items(): if int(jsonResponse['data']['current']['pollution']['aqius']) in val: data += f\"{key}\\n\"", "range(260, 281), 'WNW': range(282, 304), 'NW': range(305, 326), 'NNW': range(327, 348)} data =", "members += ', '.join([role.name for role in member.roles]) + \")\\n\" else: members +=", "enabled=False) async def kick(self, ctx, member: discord.member, *, reason=None): if member.hasPermission('KICK_MEMBERS'): await member.kick(reason=reason)", "@commands.command(name='unban', description='Unban member from server', help='Unban member from server', ignore_extra=True, hidden=True, enabled=False) async", "state): # This intended as a 'good enough' tool. There are some accuracy", "\" for key, val in wind_mapping.items(): if int(jsonResponse['data']['current']['weather']['wd']) in val: data += f\"{jsonResponse['data']['current']['weather']['wd']}°", "= '\\n - '.join([channel.name for channel in guild.text_channels]) voice_channels = '\\n - '.join([channel.name", "@commands.has_permissions(manage_messages=True) async def clear(self, ctx, amount=3): await ctx.channel.purge(limit=amount) @commands.command(name='ip_abuse', description='Get abuse score for", "f\"Temperature: {int(((jsonResponse['data']['current']['weather']['tp']) * 9) / 5) + 32}°F\\n\" data += f\"Pressure: {jsonResponse['data']['current']['weather']['pr']} hPa\\n\"", "def ping(self, ctx): await ctx.channel.send(f\"Pong... {round(self.bot.latency * 1000)} ms\") @commands.command(name='server', description='Server information', help='Server", "ip_info['data'].items(): data += f\"{k}: {v}\\n\" await ctx.channel.send(data) @commands.command(name='kick', description='Kick member off server', help='Kick", "description='Server information', help='Server information', ignore_extra=True, hidden=False, enabled=True) @commands.has_role('admins') async def server(self, ctx): guild", "{ 'ipAddress': ipAddress, 'maxAgeInDays': '90' } headers = { 'Accept': 'application/json', 'Key': ABUSEIPDB_KEY", "reason=None): if member.hasPermission('KICK_MEMBERS'): await member.kick(reason=reason) await ctx.send(f\"Kicked {member.mention}\") @commands.command(name='member', description='Member information', help='Member information',", "the specific language governing permissions and limitations under the License. \"\"\" import discord", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "'.join([channel.name for channel in guild.voice_channels]) members = '\\n - '.join([member.name for member in", "'https://api.abuseipdb.com/api/v2/check' querystring = { 'ipAddress': ipAddress, 'maxAgeInDays': '90' } headers = { 'Accept':", "amount=3): await ctx.channel.purge(limit=amount) @commands.command(name='ip_abuse', description='Get abuse score for IP', help='Get abuse score for", "{member.top_role}\\n\" f\"Roles: {str(', '.join([role.name for role in member.roles]).replace('@', ''))}\\n\") @commands.command(name='members', description='Current Members', help='Current", "<reponame>ldgregory/bishbot #! /usr/bin/env python3 \"\"\" Bishbot - https://github.com/ldgregory/bishbot <NAME> <<EMAIL>> fun.py v0.1 Tested", "f\"{jsonResponse['data']['location']['coordinates']}\\n\" # ic comes through like 01d or 01n to differentiate day or", "# actually 11.25 - 33.75 degrees vs the ints required by range(). url", "applicable law or agreed to in writing, software distributed under the License is", "f\"Server Owner: {guild.owner} (ID: {guild.owner_id})\\n\" f\"Server Description: {guild.description}\\n\" f\"Region: {guild.region}\\n\" f\"File Size Limit:", "if showall == 'showall': members += f\"- {member.display_name} : \" members += f\"{member.name}", "ctx.guild.unban(user) await ctx.send(f\"Unbanned {user.mention}\") return @commands.command(name='weather', description='Weather as !weather CITY STATE', help='Weather as", "f\"Discord ID: {member.id}\\n\" f\"Joined: {member.joined_at}\\n\" f\"Status: {member.status}\\n\" # f\"Is on Mobile: {member.is_on_mobile}\\n\" #", "f\"Status: {member.status}\\n\" # f\"Is on Mobile: {member.is_on_mobile}\\n\" # f\"Activity: {str(member.activity.type).lstrip('ActivityType.')} {member.activity.name}\\n\" f\"Guild: {member.guild}\\n\"", "{str(member.activity.type).lstrip('ActivityType.')} {member.activity.name}\\n\" f\"Guild: {member.guild}\\n\" f\"Guild Permissions: {member.guild_permissions}\\n\" f\"Top Role: {member.top_role}\\n\" f\"Roles: {str(', '.join([role.name", "os.getenv('ABUSEIPDB_KEY') url = 'https://api.abuseipdb.com/api/v2/check' querystring = { 'ipAddress': ipAddress, 'maxAgeInDays': '90' } headers", "'Snow', '50': 'Mist'} aqius_mapping = {'Good': range(0, 50), 'Moderate': range(51, 100), 'Unhealthy for", "as a 'good enough' tool. There are some accuracy issues # such as", "* 9) / 5) + 32}°F\\n\" data += f\"Pressure: {jsonResponse['data']['current']['weather']['pr']} hPa\\n\" data +=", "from discord.ext import commands from dotenv import load_dotenv load_dotenv() BOT_PREFIX = os.getenv('DISCORD_BOT_PREFIX') GUILD", "server', help='Kick member off server', ignore_extra=True, hidden=True, enabled=False) async def kick(self, ctx, member:", "vs the ints required by range(). url = f\"https://api.airvisual.com/v2/city?city={city.replace('_', '%20')}&state={state.replace('_', '%20')}&country=USA&key={AIRVISUAL_KEY}\" response =", "reason=None): if member.hasPermission('BAN_MEMBERS'): await member.ban(reason=reason) await ctx.send(f\"Banned {member.mention}\") @commands.command(name='clear', description='Clear x messages, defaults", "ABUSEIPDB_KEY } response = requests.request(method='GET', url=url, headers=headers, params=querystring) ip_info = json.loads(response.text) data =", "await ctx.channel.send(f\"Server Name: {guild.name} (ID: {guild.id})\\n\" f\"Server Owner: {guild.owner} (ID: {guild.owner_id})\\n\" f\"Server Description:", "description='Member information', help='Member information', ignore_extra=True, hidden=False, enabled=True) async def member(self, ctx, mention): guild", "underscores instead of spaces in city or state, i.e. Sante_Fe New_Mexico\") @commands.command(name='whoami', description='Info", "ctx.channel.send(data) else: await ctx.channel.send(f\"No data found. Make sure to use underscores instead of", "if (user.name, user.discriminator) == (member_name, member_discriminator): await ctx.guild.unban(user) await ctx.send(f\"Unbanned {user.mention}\") return @commands.command(name='weather',", "on_ready(self): print('- Utility Cog loaded') @commands.command(name='ban', description='Ban member from server', help='Ban member from", "the AQIUS then do a map to ranges and print out a human", "101), 'ESE': range(102, 124), 'SE': range(125, 146), 'SSE': range(147, 169), 'S': range(170, 191),", "by abuseipdb.com**\\n\\n' for k, v in ip_info['data'].items(): data += f\"{k}: {v}\\n\" await ctx.channel.send(data)", "150), 'Unhealthy': range(151, 200), 'Very Unhealthy': range(201, 300), 'Hazardous': range(301, 500)} wind_mapping =", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "= os.getenv('DISCORD_GUILD') AIRVISUAL_KEY = os.getenv('AIRVISUAL_KEY') class Utility(commands.Cog): def __init__(self, bot): self.bot = bot", "enabled=True) @commands.has_role('admins') async def server(self, ctx): guild = discord.utils.get(self.bot.guilds, name=GUILD) text_channels = '\\n", "of C to F temps and wind_mapping where NNE is # actually 11.25", "human friendly text version. data += f\"{weather_mapping[jsonResponse['data']['current']['weather']['ic'][:-1]]}\\n\" # Print out the AQIUS then", "There are some accuracy issues # such as conversion of C to F", "import load_dotenv load_dotenv() BOT_PREFIX = os.getenv('DISCORD_BOT_PREFIX') GUILD = os.getenv('DISCORD_GUILD') AIRVISUAL_KEY = os.getenv('AIRVISUAL_KEY') class", "server', help='Unban member from server', ignore_extra=True, hidden=True, enabled=False) async def unban(self, ctx, *,", "= f\"{jsonResponse['data']['city']}, {jsonResponse['data']['state']}, {jsonResponse['data']['country']}\\n\" data += f\"{jsonResponse['data']['location']['coordinates']}\\n\" # ic comes through like 01d", "for member in guild.members: if showall == 'showall': members += f\"- {member.display_name} :", "ctx, showall=None): guild = discord.utils.get(self.bot.guilds, name=GUILD) members = '' for member in guild.members:", "{'Good': range(0, 50), 'Moderate': range(51, 100), 'Unhealthy for Sensitive Groups': range(101, 150), 'Unhealthy':", "just mapping the numerical part to the human friendly text version. data +=", "ctx, amount=3): await ctx.channel.purge(limit=amount) @commands.command(name='ip_abuse', description='Get abuse score for IP', help='Get abuse score", "ctx.channel.send(f\"Pong... {round(self.bot.latency * 1000)} ms\") @commands.command(name='server', description='Server information', help='Server information', ignore_extra=True, hidden=False, enabled=True)", "int(jsonResponse['data']['current']['weather']['wd']) in val: data += f\"{jsonResponse['data']['current']['weather']['wd']}° ({key})\\n\" await ctx.channel.send(data) else: await ctx.channel.send(f\"No data", "GUILD = os.getenv('DISCORD_GUILD') AIRVISUAL_KEY = os.getenv('AIRVISUAL_KEY') class Utility(commands.Cog): def __init__(self, bot): self.bot =", "for IP', help='Get abuse score for IP', ignore_extra=True, hidden=False, enabled=True) async def ip_abuse(self,", "in guild.text_channels]) voice_channels = '\\n - '.join([channel.name for channel in guild.voice_channels]) members =", "description='Weather as !weather CITY STATE', help='Weather as !weather Santa_Fe New_Mexico', ignore_extra=True, hidden=True, enabled=True)", "accuracy issues # such as conversion of C to F temps and wind_mapping", "temps and wind_mapping where NNE is # actually 11.25 - 33.75 degrees vs", "unban(self, ctx, *, member): banned_users = await ctx.guild.bans() member_name, member_discriminator = member.split('#') for", "'.join([role.name for role in member.roles]) + \")\\n\" else: members += f\"- {member.display_name}\\n\" await", "+= f\"Wind: {int(jsonResponse['data']['current']['weather']['ws'] * 2.236936)} m/h from \" for key, val in wind_mapping.items():", "discord.utils.get(self.bot.guilds, name=GUILD) for member in guild.members: if str(member.id) == str(mention.lstrip('<@!').rstrip('>')): await ctx.channel.send(f\"Nickname: {member.nick}\\n\"", "for ban_entry in banned_users: user = ban_entry.user if (user.name, user.discriminator) == (member_name, member_discriminator):", "in guild.voice_channels]) members = '\\n - '.join([member.name for member in guild.members]) await ctx.channel.send(f\"Server", "i.e. Sante_Fe New_Mexico\") @commands.command(name='whoami', description='Info about you', help='Info about you', ignore_extra=True, hidden=True, enabled=True)", "guild.text_channels]) voice_channels = '\\n - '.join([channel.name for channel in guild.voice_channels]) members = '\\n", "Initial code Copyright 2020 <NAME> Licensed under the Apache License, Version 2.0 (the", "- https://github.com/ldgregory/bishbot <NAME> <<EMAIL>> fun.py v0.1 Tested to Python v3.7.3 Description: Fun commands", "ctx.send(f\"Banned {member.mention}\") @commands.command(name='clear', description='Clear x messages, defaults to 3', help='Clear x messages, defaults", "= {'Good': range(0, 50), 'Moderate': range(51, 100), 'Unhealthy for Sensitive Groups': range(101, 150),", "enabled=True) async def ip_abuse(self, ctx, ipAddress): ABUSEIPDB_KEY = os.getenv('ABUSEIPDB_KEY') url = 'https://api.abuseipdb.com/api/v2/check' querystring", "showall=None): guild = discord.utils.get(self.bot.guilds, name=GUILD) members = '' for member in guild.members: if", "f\"**Server Members: {guild.member_count}**\\n - {members}\") @commands.command(name='unban', description='Unban member from server', help='Unban member from", "as !weather CITY STATE', help='Weather as !weather Santa_Fe New_Mexico', ignore_extra=True, hidden=True, enabled=True) async", "- Fixed members command for intents 20200522 - Initial code Copyright 2020 <NAME>", "help='Ping latency', ignore_extra=True, hidden=False, enabled=True) async def ping(self, ctx): await ctx.channel.send(f\"Pong... {round(self.bot.latency *", "requests.request(method='GET', url=url, headers=headers, params=querystring) ip_info = json.loads(response.text) data = '**Data provided by abuseipdb.com**\\n\\n'", "def server(self, ctx): guild = discord.utils.get(self.bot.guilds, name=GUILD) text_channels = '\\n - '.join([channel.name for", "guild = discord.utils.get(self.bot.guilds, name=GUILD) members = '' for member in guild.members: if showall", "async def unban(self, ctx, *, member): banned_users = await ctx.guild.bans() member_name, member_discriminator =", "return @commands.command(name='weather', description='Weather as !weather CITY STATE', help='Weather as !weather Santa_Fe New_Mexico', ignore_extra=True,", "01n to differentiate day or night, we don't care # so we're just", "'NW': range(305, 326), 'NNW': range(327, 348)} data = f\"{jsonResponse['data']['city']}, {jsonResponse['data']['state']}, {jsonResponse['data']['country']}\\n\" data +=", "f\"Region: {guild.region}\\n\" f\"File Size Limit: {guild.filesize_limit} bytes\\n\\n\" f\"**Text Channels:**\\n - {text_channels}\\n\\n\" f\"**Voice Channels:**\\n", "with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "member from server', help='Ban member from server', ignore_extra=True, hidden=True, enabled=False) async def ban(self,", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software", "- '.join([member.name for member in guild.members]) await ctx.channel.send(f\"Server Name: {guild.name} (ID: {guild.id})\\n\" f\"Server", "range(12, 34), 'NE': range(35, 56), 'ENE': range(57, 79), 'E': range(80, 101), 'ESE': range(102,", "{guild.owner_id})\\n\" f\"Server Description: {guild.description}\\n\" f\"Region: {guild.region}\\n\" f\"File Size Limit: {guild.filesize_limit} bytes\\n\\n\" f\"**Text Channels:**\\n", "range(102, 124), 'SE': range(125, 146), 'SSE': range(147, 169), 'S': range(170, 191), 'SSW': range(192,", "'NNW': range(327, 348)} data = f\"{jsonResponse['data']['city']}, {jsonResponse['data']['state']}, {jsonResponse['data']['country']}\\n\" data += f\"{jsonResponse['data']['location']['coordinates']}\\n\" # ic" ]
[ "x.dt.hour, delta), delta) def hours_chats(delta=0.0): \"\"\" Shows chart of average number of messages", "Monthly (not working) def monthly_conversation(conversation): # TODO not working charts for monthly \"\"\"", "from all conversations (default None) :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]'))) def", "conversation id) :param function: pandas function that returns requested time part :param delta:", "if conversation is not None: for key in data.keys(): if key.lower().startswith(conversation.lower()): return data,", "path to the downloaded .zip file :return: None You can provide relative path", "None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]'))) def monthly_chats(): \"\"\" Shows chart of number", ":param conversation: conversation id or None for overall statistics (default None) :param chars:", "except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() if", "key) break else: print('Conversation not found.') except FileNotFoundError: if input('Characters not counted. Count", "'messages/inbox/')} for sender in senders: messages, i = collections.Counter(), 0 while True: try:", "== ':'\\ else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try: source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) break", "of number of messages grouped by timeframe \"\"\" messages, i = collections.Counter(), 0", "0 while True: try: i += 1 messages += collections.Counter(pd.DataFrame(json.loads( source.open('messages/inbox/' + sender", "collections.Counter(), 0 while True: try: i += 1 messages += collections.Counter(pd.DataFrame(json.loads( source.open('messages/inbox/' +", "requested messages (usually conversation id) :param function: pandas function that returns requested time", "if user: data = pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if key.lower().startswith(conversation.lower()): return data,", "matplotlib import pyplot as plt # Getting data def set_source(filename): \"\"\" Sets source", "total[sender] = dict(counted_all) with open('messages_chars.json', 'w', encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False) def", "for key in data.keys(): if key.startswith(user_input[1]): yearly_conversation(key) break else: print('Conversation not found.') else:", "None \"\"\" try: data = json.loads(open('messages_chars.json' if chars else 'messages.json', 'r', encoding='utf-8').read()) if", "'?': print('Messenger Counter available commands:') print(' count - counts all messages and saves", "per day from the beginning of the conversation. :param conversation: conversation id, or", "print(' chars - counts all characters and saves to messages_chars.json') print(' stats [conversation,", "hours_chats(difference) else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()):", "len(user_input) > 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) data = pd.DataFrame(data).fillna(0).astype('int') for", "chars in messages_chars.json, False for counting messages in messages.json (default False) :param user:", "messages_chars.json. :return: None \"\"\" namelist = source.namelist() total, senders = {}, {x.split('/')[2] for", "\"\"\" Prints messages overall statistics of given data source. :param data_source: dictionary containing", "v for k, v in messages.items()} total[sender]['total'] = sum(messages.values()) with open('messages.json', 'w', encoding='utf-8')", "'r', encoding='utf-8').read()) if len(user_input) > 1: for key in data.keys(): if key.startswith(user_input[1]): monthly_conversation(key)", "pd.set_option('display.max_rows', None) print(user_name, 'statistics:') print(data_source) # Intervals def interval_count(inbox_name, function, delta=0.0): \"\"\" Counts", "').lower() == 'y': count_messages() if user_input[0] == 'user': if len(user_input) > 1: try:", "yearly messages') print(' [specific user]') # print(' monthly [name, -d] - monthly messages", "k, v in messages.items()} total[sender]['total'] = sum(messages.values()) with open('messages.json', 'w', encoding='utf-8') as output:", "user_input[2] == '-c': try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) for key in data.keys():", "messages, False otherwise (default False) :return: None \"\"\" if conversation is None: if", "directory messages += collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads( source.open('messages/inbox/' + inbox_name + '/message_' + str(i) + '.json').read())[", "data, None except FileNotFoundError: logging.error('Characters not counted.' if chars else 'Messages not counted.')", "0 else 24)], rotation=90) plt.xlim(-1, 24) plt.savefig('messages.pdf') plt.show() # Daily def daily(difference, conversation=None):", "delta)) def daily_chats(delta=0.0): \"\"\" Shows chart of number of messages per day across", "chars=False, user=False): \"\"\" Reads data from messages.json or messages_chars.json and finds key based", "pd.set_option('display.max_rows', None) total_values = data_source.loc['total'].sort_values(ascending=False) print(total_values) print(total_values.describe()) total_values = total_values.sort_values() plt.rcdefaults() plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:])", "# print(' monthly [name, -d] - monthly messages (available soon)') # print(' [specific", "and not user_input[1] == '-h': try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input)", ":return: None \"\"\" messages = pd.Series(messages).sort_index() print(messages.describe()) plt.bar(messages.index, messages) plt.savefig('messages.pdf') plt.show() # Hours", "while True: user_input = input('>').split(' ') if user_input[0] == 'exit': break if user_input[0]", "conversation: conversation id, or key from get_data() function :return: None \"\"\" data_source =", "len(user_input) < 3: hours_conversation(key) else: hours_conversation(key, float(user_input[2])) break else: print('Conversation not found.') else:", "data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input) > 1: for key in data.keys():", "def yearly_chats(): \"\"\" Shows chart of number of messages per year across all", "conversation_statistics(data_source, conversation) def messages_statistics(data_source): \"\"\" Prints messages overall statistics of given data source.", "print('Conversation not found.') def hours_conversation(conversation, delta=0.0): \"\"\" Shows chart of average number of", "messages send by hour throughout the day. :param difference: number of hours to", "number of hours to time shift by and show statistics differently :param conversation:", "not counted. Count characters?[y/n] ').lower() == 'y': count_characters() elif len(user_input) > 1 and", "None) print(user_name, 'statistics:') print(data_source) # Intervals def interval_count(inbox_name, function, delta=0.0): \"\"\" Counts number", "try again.') while True: user_input = input('>').split(' ') if user_input[0] == 'exit': break", "None \"\"\" if conversation is None: yearly_chats() else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read())", "User statistics def user_statistics(data_source, user_name): \"\"\" Prints detailed statistics for specific person of", "json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): yearly_conversation(key) break else: print('Conversation", "or key from get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source) data_source =", ":param delta: number of hours to time shift by and show statistics differently", ":return: None \"\"\" pass # User statistics def user_statistics(data_source, user_name): \"\"\" Prints detailed", "source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())[ 'messages']).iloc[:, 0]) except KeyError:", "'y': count_characters() else: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) messages_statistics(data) except FileNotFoundError: if", "and user_input[2] == '-c': try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) for key in", "the get_data() function :param conversation: conversation id, or key from get_data() function :return:", "\"\"\" Counts messages or characters from messages and saves output to the file.", "user [name] - detailed statistics for specific user') print(' yearly [name] - yearly", "import io import json import math import zipfile import logging from urllib.error import", "delta: number of hours to time shift by and count messages differently (default", ":return: None \"\"\" if conversation is None: if chars: characters_statistics(data_source) else: messages_statistics(data_source) else:", "print(' exit - exits the program') if user_input[0] == 'stats': if len(user_input) >", "user_input[1] == '-c': try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) characters_statistics(data) except FileNotFoundError: if", "data.keys(): if key.startswith(user_input[1]): monthly_conversation(key) else: print('Conversation not found.') else: print('Please specify conversation.') except", "ensure_ascii=False) def count(chars=False): \"\"\" Counts messages or characters from messages and saves output", "counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: print('Please specify user name.') if", "get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source) data_source = data_source.loc[:, conversation] data_source", "of given data source. :param data_source: dictionary containing prepared data generated by the", "= json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): daily_conversation(key, difference) break", "the day. :param messages: dictionary of number of messages grouped by timeframe :param", "to the path of .zip file. :param filename: path to the downloaded .zip", "filename.endswith('.zip') else f'file:./{filename}.zip') try: global source source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) except URLError: logging.error('File not", "key else: logging.error('Conversation not found.') return None, None if conversation is not None:", "from get_data() function :return: None \"\"\" pass # User statistics def user_statistics(data_source, user_name):", "False for counting messages (default False) :return: None \"\"\" if chars: count_characters() else:", "across all conversation. :param delta: number of hours to time shift by and", "- exits the program') if user_input[0] == 'stats': if len(user_input) > 2 and", "print('Conversation not found.') else: print('Please specify conversation.') except FileNotFoundError: if input('Messages not counted.", "data generated by the get_data() function :param conversation: conversation id, or key from", ":return: None \"\"\" if conversation is None: hours_chats(difference) else: data = json.loads(open('messages.json', 'r',", "available commands:') print(' count - counts all messages and saves to messages.json') print('", "> 1: for key in data.keys(): if key.startswith(user_input[1]): if len(user_input) < 3: hours_conversation(key)", "'y': count_messages() else: monthly_chats() if user_input[0] == 'yearly': if len(user_input) > 1: try:", "and x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.year) messages", "id or None for statistics from all conversations (default None) :return: None \"\"\"", "import urlopen import pandas as pd from matplotlib import pyplot as plt #", "').lower() == 'y': count_messages() elif len(user_input) > 1 and user_input[1] == '-h': daily_chats(float(user_input[2]))", "= {}, {x.split('/')[2] for x in namelist if (x.endswith('/') and x.startswith('messages/inbox/') and x", "of conversation id, False otherwise (default False) :return: dictionary containing the data and", "files in requested directory messages += collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads( source.open('messages/inbox/' + inbox_name + '/message_' +", "chars else 'Messages not counted.') # Counting messages and characters def count_messages(): \"\"\"", "across all conversation. :return: None \"\"\" messages = collections.Counter() for sender in {x.split('/')[2]", "collections import io import json import math import zipfile import logging from urllib.error", "data_source = data_source.sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) print(f'Total characters: {data_source.sum()}') # TODO characters conversation", "and x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]')) interval_plot(messages)", "from all conversations (default None) :return: None \"\"\" if conversation is None: yearly_chats()", "'yearly': if len(user_input) > 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input)", "the conversation id or None for overall statistics (default None) :param chars: True", "print('Conversation not found.') def yearly_conversation(conversation): \"\"\" Shows chart of number of messages per", "interval_count(inbox_name, function, delta=0.0): \"\"\" Counts number of messages based on given timeframe function", "data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): daily_conversation(key, difference)", "return messages def interval_plot(messages): \"\"\" Shows chart based on previously defined timeframe :param", "and saves to messages_chars.json') print(' stats [conversation, -c] - displays statistics for counted", "with open('messages_chars.json', 'w', encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False) def count(chars=False): \"\"\" Counts", "for counting characters, False for counting messages (default False) :return: None \"\"\" if", "break else: print('Conversation not found.') def yearly_conversation(conversation): \"\"\" Shows chart of number of", "daily_chats(float(user_input[2])) else: daily_chats() if user_input[0] == 'monthly': if len(user_input) > 1: try: data", ":param data_source: dictionary containing prepared data generated by the get_data() function :param user_name:", "and count messages differently (default 0.0) :return: dictionary of number of messages grouped", "name instead of conversation id, False otherwise (default False) :return: dictionary containing the", "x: x.dt.year)) def yearly_chats(): \"\"\" Shows chart of number of messages per year", "break except URLError: print('File not found, try again.') while True: user_input = input('>').split('", "count_messages() else: monthly_chats() if user_input[0] == 'yearly': if len(user_input) > 1: try: data", "else 24)], rotation=90) plt.xlim(-1, 24) plt.savefig('messages.pdf') plt.show() # Daily def daily(difference, conversation=None): \"\"\"", "x.dt.date, delta)) def daily_chats(delta=0.0): \"\"\" Shows chart of number of messages per day", "i = collections.Counter(), 0 while True: try: i += 1 frame = pd.DataFrame(json.loads(", "working charts for monthly \"\"\" Shows chart of number of messages per month.", "'.json').read())[ 'messages']).iloc[:, 0]) except KeyError: break total[sender] = {k.encode('iso-8859-1').decode('utf-8'): v for k, v", "encoding='utf-8').read()) for key in data.keys(): if key.startswith(user_input[1]): characters_conversation_statistics(data, key) break else: print('Conversation not", "print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.savefig('messages.pdf') plt.show() if __name__=='__main__': while True: filename = input('Enter filename:", "data.keys(): if key.startswith(user_input[1]): if len(user_input) < 3: hours_conversation(key) else: hours_conversation(key, float(user_input[2])) break else:", "counted. Count characters?[y/n] ').lower() == 'y': count_characters() elif len(user_input) > 1 and not", "and finds key based on the beginning of the string. :param conversation: beginning", ":param conversation: conversation id or None for statistics from all conversations (default None)", ":param user_name: person name, or key from get_data() function :return: None \"\"\" data_source", "- detailed statistics for specific user') print(' yearly [name] - yearly messages') print('", "None if conversation is not None: for key in data.keys(): if key.lower().startswith(conversation.lower()): return", ":return: None \"\"\" messages = collections.Counter() for sender in {x.split('/')[2] for x in", "for k, v in messages.items()} total[sender]['total'] = sum(messages.values()) with open('messages.json', 'w', encoding='utf-8') as", "json.loads(open('messages_chars.json' if chars else 'messages.json', 'r', encoding='utf-8').read()) if user: data = pd.DataFrame(data).fillna(0).astype('int') for", "sender + '/message_' + str(i) + '.json').read())[ 'messages']).iloc[:, 0]) except KeyError: break total[sender]", "not working charts for monthly \"\"\" Shows chart of number of messages per", "else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): daily_conversation(key,", "the data and if applicable a key pointing to a specific conversation, otherwise", "break total[sender] = dict(counted_all) with open('messages_chars.json', 'w', encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False)", "else: print('Conversation not found.') else: print('Please specify conversation.') except FileNotFoundError: if input('Messages not", "0.0) :return: None \"\"\" hours_plot(interval_count(conversation, lambda x: x.dt.hour, delta), delta) def hours_chats(delta=0.0): \"\"\"", "timeframe :param delta: number of hours to time shift by and show statistics", "day. :param delta: number of hours to time shift by and show statistics", "'/message_' + str(i) + '.json').read())[ 'messages']).iloc[:, 1], unit='ms').dt.tz_localize('UTC').dt.tz_convert( 'Europe/Warsaw').add(pd.Timedelta(hours=-delta)))) except KeyError: break return", "show statistics differently (default 0.0) :return: None \"\"\" hours_plot(interval_count(conversation, lambda x: x.dt.hour, delta),", "0].sort_values(ascending=False) data_source.index = data_source.index.map(lambda x: x.split('_')[0][:30]) pd.set_option('display.max_rows', None) print(user_name, 'statistics:') print(data_source) # Intervals", "unit='ms').dt.tz_localize('UTC').dt.tz_convert( 'Europe/Warsaw').add(pd.Timedelta(hours=-delta)))) except KeyError: break return messages def interval_plot(messages): \"\"\" Shows chart based", "0 while True: try: i += 1 frame = pd.DataFrame(json.loads( source.open('messages/inbox/' + sender", "Yearly def yearly(conversation=None): \"\"\" Shows chart of number of messages per year. :param", "urllib.error import URLError from urllib.request import urlopen import pandas as pd from matplotlib", "False) :return: None \"\"\" if conversation is None: if chars: characters_statistics(data_source) else: messages_statistics(data_source)", "-h] - daily messages') print(' [specific user, hours difference]') print(' hours [name, -h]", "user_input[0] == '' or user_input[0] == 'count': count_messages() if user_input[0] == 'chars': count_characters()", "\"\"\" Shows chart of number of messages per day. :param difference: number of", "(default None) :return: None \"\"\" if conversation is None: yearly_chats() else: data =", "while True: try: i += 1 frame = pd.DataFrame(json.loads( source.open('messages/inbox/' + sender +", "of .zip file. :param filename: path to the downloaded .zip file :return: None", "prepared data generated by the get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source).fillna(0).astype('int')", "x: x.dt.to_period(\"M\").astype('datetime64[ns]'))) def monthly_chats(): \"\"\" Shows chart of number of messages per month", "encoding='utf-8').read()) messages_statistics(data) except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':", "shift by and show statistics differently (default 0.0) :return: None \"\"\" hours_plot(interval_count(conversation, lambda", "of messages per month. :param conversation: conversation id or None for statistics from", "import json import math import zipfile import logging from urllib.error import URLError from", "data_source.index.map(lambda x: x.split('_')[0][:30]) pd.set_option('display.max_rows', None) print(user_name, 'statistics:') print(data_source) # Intervals def interval_count(inbox_name, function,", "\"\"\" messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.xticks(list(range(24)), [f'{x % 24}:{int(abs((delta -", "(default False) :return: dictionary containing the data and if applicable a key pointing", "import pandas as pd from matplotlib import pyplot as plt # Getting data", "conversation: conversation id or None for statistics from all conversations (default None) :return:", "lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]'))) def monthly_chats(): \"\"\" Shows chart of number of messages per", "get_data() function :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.year)) def yearly_chats(): \"\"\" Shows", "Shows chart of number of messages per day across all conversation. :param delta:", "1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input) > 1: for key", "Prints statistics of given data source. :param data_source: dictionary containing prepared data generated", "of number of messages per year. :param conversation: conversation id or None for", "= zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) break except URLError: print('File not found, try again.') while True: user_input", "v in messages.items()} total[sender]['total'] = sum(messages.values()) with open('messages.json', 'w', encoding='utf-8') as output: json.dump(total,", "hours_conversation(conversation, delta=0.0): \"\"\" Shows chart of average number of messages send in specific", "Absolute path (works only on Windows) >>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip') \"\"\" filename = f'file:///{filename}' if", "def messages_statistics(data_source): \"\"\" Prints messages overall statistics of given data source. :param data_source:", "def user_statistics(data_source, user_name): \"\"\" Prints detailed statistics for specific person of given data", "per year. :param conversation: conversation id or None for statistics from all conversations", "average number of messages send in specific conversation by hour throughout the day.", "-1] data_source = data_source.sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) print(f'Total characters: {data_source.sum()}') # TODO characters", "key in data.keys(): if key.startswith(user_input[1]): if len(user_input) < 3: hours_conversation(key) else: hours_conversation(key, float(user_input[2]))", "lambda x: x.dt.year)) def yearly_chats(): \"\"\" Shows chart of number of messages per", "monthly \"\"\" Shows chart of number of messages per month. :param conversation: conversation", "specify user name.') if user_input[0] == 'daily': if len(user_input) > 1 and not", "differently (default 0.0) :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.date, delta)) def daily_chats(delta=0.0):", "id, or key from get_data() function :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.year))", "# Getting data def set_source(filename): \"\"\" Sets source global variable to the path", "# User statistics def user_statistics(data_source, user_name): \"\"\" Prints detailed statistics for specific person", "print(' daily [name, -h] - daily messages') print(' [specific user, hours difference]') print('", "else: print('Conversation not found.') except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower()", "saves to messages.json') print(' chars - counts all characters and saves to messages_chars.json')", "total_values.iloc[-20:]) plt.show() def conversation_statistics(data_source, conversation): \"\"\" Prints messages statistics for specific conversation of", "[name, -h] - hour distribution of messages') print(' [specific user, hours difference]') print('", "= pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.savefig('messages.pdf') plt.show() if __name__=='__main__': while True: filename", "user_input[0] == '?': print('Messenger Counter available commands:') print(' count - counts all messages", "total[sender] = {k.encode('iso-8859-1').decode('utf-8'): v for k, v in messages.items()} total[sender]['total'] = sum(messages.values()) with", "of number of messages per day from the beginning of the conversation. :param", "statistics differently (default 0.0) :return: None \"\"\" hours_plot(interval_count(conversation, lambda x: x.dt.hour, delta), delta)", "pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if key.startswith(' '.join(user_input[1:])): user_statistics(data, key) break else: print('Conversation", "Counter available commands:') print(' count - counts all messages and saves to messages.json')", "otherwise (default False) :return: dictionary containing the data and if applicable a key", "messages, i = collections.Counter(), 0 while True: try: i += 1 # iterates", "day. :param difference: number of hours to time shift by and show statistics", "def monthly_conversation(conversation): # TODO not working charts for monthly \"\"\" Shows chart of", "'y': count_messages() elif len(user_input) > 1 and user_input[1] == '-c': try: data =", "per day across all conversation. :param delta: number of hours to time shift", "characters_statistics(data) except FileNotFoundError: if input('Characters not counted. Count characters?[y/n] ').lower() == 'y': count_characters()", "data.keys(): if key.startswith(user_input[1]): conversation_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError: if", "if conversation is None: daily_chats(difference) else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key", "= f'file:///{filename}' if filename[1] == ':' \\ else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip')", "return data, key else: logging.error('Conversation not found.') return None, None if conversation is", "prepared data generated by the get_data() function :param conversation: conversation id or None", "number of messages per day across all conversation. :param delta: number of hours", "to time shift by and count messages differently (default 0.0) :return: dictionary of", "by timeframe :return: None \"\"\" messages = pd.Series(messages).sort_index() print(messages.describe()) plt.bar(messages.index, messages) plt.savefig('messages.pdf') plt.show()", "a key pointing to a specific conversation, otherwise None \"\"\" try: data =", "x.startswith('messages/inbox/') and x != 'messages/inbox/')} for sender in senders: messages, i = collections.Counter(),", "characters statistics of given data source. :param data_source: dictionary containing prepared data generated", "24) plt.savefig('messages.pdf') plt.show() # Daily def daily(difference, conversation=None): \"\"\" Shows chart of number", "conversation. :return: None \"\"\" messages = collections.Counter() for sender in {x.split('/')[2] for x", "data generated by the get_data() function :param user_name: person name, or key from", "conversation): \"\"\" Prints characters statistics for specific conversation of given data source. :param", "data = pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if key.lower().startswith(conversation.lower()): return data, key else:", "key in data.keys(): if key.lower().startswith(conversation.lower()): yearly_conversation(key) break else: print('Conversation not found.') def yearly_conversation(conversation):", "data_source.sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) print(f'Total characters: {data_source.sum()}') # TODO characters conversation statistics def", "on previously defined timeframe :param messages: dictionary of number of messages grouped by", "total, senders = {}, {x.split('/')[2] for x in namelist if (x.endswith('/') and x.startswith('messages/inbox/')", "pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())['messages']) frame['counted'] = frame.apply(", "by and show statistics differently :return: None \"\"\" messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe())", "0.0) :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.date, delta)) def daily_chats(delta=0.0): \"\"\" Shows", "of messages per year. :param conversation: conversation id or None for statistics from", "- hour distribution of messages') print(' [specific user, hours difference]') print(' help -", "if user_input[0] == 'chars': count_characters() if user_input[0] == 'help' or user_input[0] == '?':", ":param messages: dictionary of number of messages grouped by timeframe :param delta: number", "interval_plot(interval_count(conversation, lambda x: x.dt.year)) def yearly_chats(): \"\"\" Shows chart of number of messages", "beginning of the string. :param conversation: beginning of the conversation id or None", "by hour throughout the day. :param delta: number of hours to time shift", "get_data(conversation=None, chars=False, user=False): \"\"\" Reads data from messages.json or messages_chars.json and finds key", "False for counting messages in messages.json (default False) :param user: True for user", "throughout the day. :param conversation: conversation id, or key from get_data() function :param", "messages_chars.json, False for counting messages in messages.json (default False) :param user: True for", "number of messages per year across all conversation. :return: None \"\"\" messages =", "== '-c': try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) for key in data.keys(): if", "+ str(i) + '.json').read())[ 'messages']).iloc[:, 1], unit='ms').dt.tz_localize('UTC').dt.tz_convert( 'Europe/Warsaw').add(pd.Timedelta(hours=-delta)))) except KeyError: break return messages", "print(' stats [conversation, -c] - displays statistics for counted messages') print(' [detailed statistics", "for character statistics instead of messages, False otherwise (default False) :return: None \"\"\"", "data_source[data_source > 0].sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) def characters_statistics(data_source): \"\"\" Prints characters statistics of", "number of messages grouped by timeframe :param delta: number of hours to time", "user_input[1] == '-h': try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input) > 1:", "file :return: None You can provide relative path to file >>> set_source('facebook-YourName.zip') Absolute", "delta) def hours_plot(messages, delta): \"\"\" Shows chart of average number of messages grouped", "x.dt.year) messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.savefig('messages.pdf') plt.show() if __name__=='__main__': while", "+= 1 # iterates over all .json files in requested directory messages +=", "x: x.dt.hour, delta), delta) def hours_chats(delta=0.0): \"\"\" Shows chart of average number of", "the get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source) data_source['total'] = data_source.sum(axis=1) data_source", "== '-h': daily_chats(float(user_input[2])) else: daily_chats() if user_input[0] == 'monthly': if len(user_input) > 1:", "total_values = data_source.loc['total'].sort_values(ascending=False) print(total_values) print(total_values.describe()) total_values = total_values.sort_values() plt.rcdefaults() plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:]) plt.show() def", "set_source('C:/Users/Admin/Downloads/facebook-YourName.zip') \"\"\" filename = f'file:///{filename}' if filename[1] == ':' \\ else (f'file:./{filename}' if", "+ inbox_name + '/message_' + str(i) + '.json').read())[ 'messages']).iloc[:, 1], unit='ms').dt.tz_localize('UTC').dt.tz_convert( 'Europe/Warsaw').add(pd.Timedelta(hours=-delta)))) except", "on given timeframe function :param inbox_name: directory name that contains requested messages (usually", "data_source.sum(axis=1) data_source = data_source.iloc[:, -1] data_source = data_source.sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) print(f'Total characters:", ":param function: pandas function that returns requested time part :param delta: number of", "messages, i = collections.Counter(), 0 while True: try: i += 1 messages +=", "Prints messages statistics for specific conversation of given data source. :param data_source: dictionary", "io import json import math import zipfile import logging from urllib.error import URLError", "x != 'messages/inbox/')} for sender in senders: counted_all, i = collections.Counter(), 0 while", "not found.') def yearly_conversation(conversation): \"\"\" Shows chart of number of messages per year", "counted messages') print(' [detailed statistics for specific conversation, character statistics]') print(' user [name]", "interval_plot(messages): \"\"\" Shows chart based on previously defined timeframe :param messages: dictionary of", "try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) data = pd.DataFrame(data).fillna(0).astype('int') for key in data.index:", "in senders: messages, i = collections.Counter(), 0 while True: try: i += 1", "characters from messages and saves output to messages_chars.json. :return: None \"\"\" namelist =", "output to the file. :param chars: True for counting characters, False for counting", "function :param inbox_name: directory name that contains requested messages (usually conversation id) :param", "Shows chart of number of messages per day from the beginning of the", "= data_source[data_source > 0].sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) def characters_statistics(data_source): \"\"\" Prints characters statistics", "if user_input[0] == 'stats': if len(user_input) > 2 and user_input[2] == '-c': try:", "function :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.year)) def yearly_chats(): \"\"\" Shows chart", "specific person of given data source. :param data_source: dictionary containing prepared data generated", ":return: dictionary of number of messages grouped by timeframe \"\"\" messages, i =", "print(' hours [name, -h] - hour distribution of messages') print(' [specific user, hours", "'messages']).iloc[:, 1], unit='ms').dt.tz_localize('UTC').dt.tz_convert( 'Europe/Warsaw').add(pd.Timedelta(hours=-delta)))) except KeyError: break return messages def interval_plot(messages): \"\"\" Shows", "if len(user_input) < 3: daily_conversation(key) else: daily_conversation(key, float(user_input[2])) break else: print('Conversation not found.')", "\"\"\" Shows chart of number of messages per year. :param conversation: conversation id", "key in data.keys(): if key.startswith(user_input[1]): conversation_statistics(data, key) break else: print('Conversation not found.') except", "and saves output to messages_chars.json. :return: None \"\"\" namelist = source.namelist() total, senders", "help prompt') print(' exit - exits the program') if user_input[0] == 'stats': if", "data_source: dictionary containing prepared data generated by the get_data() function :param conversation: conversation", "FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() if user_input[0]", "and x != 'messages/inbox/')} for sender in senders: counted_all, i = collections.Counter(), 0", "source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) except URLError: logging.error('File not found, try again.') def get_data(conversation=None, chars=False,", "to time shift by and show statistics differently (default 0.0) :return: None \"\"\"", "is None: yearly_chats() else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys():", "'-h': daily_chats(float(user_input[2])) else: daily_chats() if user_input[0] == 'monthly': if len(user_input) > 1: try:", "== '-c': try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if", "key in data.keys(): if key.lower().startswith(conversation.lower()): hours_conversation(key, difference) break else: print('Conversation not found.') def", "beginning of the conversation id or None for overall statistics (default None) :param", "plt.show() # Daily def daily(difference, conversation=None): \"\"\" Shows chart of number of messages", "counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: monthly_chats() if user_input[0] == 'yearly':", "\"\"\" Shows chart of number of messages per year from the beginning of", "if key.startswith(user_input[1]): if len(user_input) < 3: daily_conversation(key) else: daily_conversation(key, float(user_input[2])) break else: print('Conversation", "sender + '/message_' + str(i) + '.json').read())['messages']) frame['counted'] = frame.apply( lambda row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')),", "displays statistics for counted messages') print(' [detailed statistics for specific conversation, character statistics]')", "from messages.json or messages_chars.json and finds key based on the beginning of the", "output: json.dump(total, output, ensure_ascii=False) def count(chars=False): \"\"\" Counts messages or characters from messages", "pd.set_option('display.max_rows', None) print(data_source) print(f'Total characters: {data_source.sum()}') # TODO characters conversation statistics def characters_conversation_statistics(data_source,", "difference: number of hours to time shift by and show statistics differently :param", "generated by the get_data() function :param user_name: person name, or key from get_data()", "directory name that contains requested messages (usually conversation id) :param function: pandas function", "user, day difference]') print(' daily [name, -h] - daily messages') print(' [specific user,", "[specific user]') # print(' monthly [name, -d] - monthly messages (available soon)') #", "conversation=None): \"\"\" Shows chart of number of messages per day. :param difference: number", "collections.Counter(), 0 while True: try: i += 1 frame = pd.DataFrame(json.loads( source.open('messages/inbox/' +", "Shows chart of average number of messages send across all conversations by hour", "in messages_chars.json, False for counting messages in messages.json (default False) :param user: True", "else: print('Please specify user name.') if user_input[0] == 'daily': if len(user_input) > 1", "ensure_ascii=False) def count_characters(): \"\"\" Counts characters from messages and saves output to messages_chars.json.", "daily_chats(difference) else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()):", "dictionary of number of messages grouped by timeframe :param delta: number of hours", "and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.hour, delta) hours_plot(messages, delta)", "person name, or key from get_data() function :return: None \"\"\" data_source = data_source.loc[user_name]", "- monthly messages (available soon)') # print(' [specific user, day difference]') print(' daily", "except URLError: print('File not found, try again.') while True: user_input = input('>').split(' ')", "if key.lower().startswith(conversation.lower()): daily_conversation(key, difference) break else: print('Conversation not found.') def daily_conversation(conversation, delta=0.0): \"\"\"", "number of messages per year from the beginning of the conversation. :param conversation:", "or None for overall statistics (default None) :param chars: True for character statistics", "by and show statistics differently (default 0.0) :return: None \"\"\" messages = collections.Counter()", ":return: None \"\"\" if chars: count_characters() else: count_messages() # Statistics def statistics(data_source, conversation=None,", "conversation id or None for statistics from all conversations (default None) :return: None", "print(' [specific user]') # print(' monthly [name, -d] - monthly messages (available soon)')", "key in data.index: if key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation not found.') return", "data_source.loc[user_name] data_source = data_source[data_source > 0].sort_values(ascending=False) data_source.index = data_source.index.map(lambda x: x.split('_')[0][:30]) pd.set_option('display.max_rows', None)", "FileNotFoundError: if input('Characters not counted. Count characters?[y/n] ').lower() == 'y': count_characters() elif len(user_input)", ":return: None You can provide relative path to file >>> set_source('facebook-YourName.zip') Absolute path", "pd from matplotlib import pyplot as plt # Getting data def set_source(filename): \"\"\"", "int(delta)) * 60)):02}' for x in range(-(-math.floor(delta) % 24), math.floor(delta) % 24 if", "input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: yearly_chats() if user_input[0]", "messages?[y/n] ').lower() == 'y': count_messages() else: yearly_chats() if user_input[0] == 'hours': if len(user_input)", "the path of .zip file. :param filename: path to the downloaded .zip file", "= dict(counted_all) with open('messages_chars.json', 'w', encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False) def count(chars=False):", "print(data_source) def characters_statistics(data_source): \"\"\" Prints characters statistics of given data source. :param data_source:", "data_source = data_source[data_source > 0].sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) def characters_statistics(data_source): \"\"\" Prints characters", "and saves output to messages.json. :return: None \"\"\" namelist = source.namelist() total, senders", "in data.keys(): if key.startswith(user_input[1]): yearly_conversation(key) break else: print('Conversation not found.') else: print('Please specify", "grouped by timeframe :param delta: number of hours to time shift by and", "characters statistics for specific conversation of given data source. :param data_source: dictionary containing", "in data.keys(): if key.lower().startswith(conversation.lower()): daily_conversation(key, difference) break else: print('Conversation not found.') def daily_conversation(conversation,", "plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:]) plt.show() def conversation_statistics(data_source, conversation): \"\"\" Prints messages statistics for specific conversation", "Count messages?[y/n] ').lower() == 'y': count_messages() else: print('Please specify user name.') if user_input[0]", "= pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.xticks(list(range(24)), [f'{x % 24}:{int(abs((delta - int(delta)) *", "user]') # print(' monthly [name, -d] - monthly messages (available soon)') # print('", "stats [conversation, -c] - displays statistics for counted messages') print(' [detailed statistics for", "data_source = data_source[data_source > 0].sort_values(ascending=False) data_source.index = data_source.index.map(lambda x: x.split('_')[0][:30]) pd.set_option('display.max_rows', None) print(user_name,", "counting characters, False for counting messages (default False) :return: None \"\"\" if chars:", "[name, -d] - monthly messages (available soon)') # print(' [specific user, day difference]')", "len(user_input) < 3: daily_conversation(key) else: daily_conversation(key, float(user_input[2])) break else: print('Conversation not found.') else:", "average number of messages send across all conversations by hour throughout the day.", "== 'y': count_characters() elif len(user_input) > 1 and not user_input[1] == '-c': try:", "from all conversations (default None) :return: None \"\"\" if conversation is None: hours_chats(difference)", "= pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())['messages']) frame['counted'] =", "> 1: for key in data.keys(): if key.startswith(user_input[1]): if len(user_input) < 3: daily_conversation(key)", "messages per year from the beginning of the conversation. :param conversation: conversation id,", "zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) except URLError: logging.error('File not found, try again.') def get_data(conversation=None, chars=False, user=False): \"\"\"", "messages.iloc[0]) plt.savefig('messages.pdf') plt.show() if __name__=='__main__': while True: filename = input('Enter filename: ') filename", "conversation id, False otherwise (default False) :return: dictionary containing the data and if", "by hour throughout the day. :param conversation: conversation id, or key from get_data()", "characters def count_messages(): \"\"\" Counts messages and saves output to messages.json. :return: None", "counted_all += sum(frame['counted'], collections.Counter()) except KeyError: break total[sender] = dict(counted_all) with open('messages_chars.json', 'w',", "daily(difference, conversation=None): \"\"\" Shows chart of number of messages per day. :param difference:", "to the downloaded .zip file :return: None You can provide relative path to", "output to messages_chars.json. :return: None \"\"\" namelist = source.namelist() total, senders = {},", "hour throughout the day. :param delta: number of hours to time shift by", "the get_data() function :param conversation: conversation id or None for overall statistics (default", "frame.apply( lambda row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1) counted_all += sum(frame['counted'], collections.Counter()) except KeyError: break total[sender]", "key in data.keys(): if key.startswith(user_input[1]): if len(user_input) < 3: daily_conversation(key) else: daily_conversation(key, float(user_input[2]))", "instead of messages, False otherwise (default False) :return: None \"\"\" if conversation is", "to a specific conversation, otherwise None \"\"\" try: data = json.loads(open('messages_chars.json' if chars", "'messages.json', 'r', encoding='utf-8').read()) if user: data = pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if", "\"\"\" Counts messages and saves output to messages.json. :return: None \"\"\" namelist =", "elif len(user_input) > 1 and user_input[1] == '-h': daily_chats(float(user_input[2])) else: daily_chats() if user_input[0]", "count_messages() else: print('Please specify user name.') if user_input[0] == 'daily': if len(user_input) >", "of messages send in specific conversation by hour throughout the day. :param conversation:", "'.join(user_input[1:])): user_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError: if input('Messages not", "user_input[0] == 'help' or user_input[0] == '?': print('Messenger Counter available commands:') print(' count", "rotation=90) plt.xlim(-1, 24) plt.savefig('messages.pdf') plt.show() # Daily def daily(difference, conversation=None): \"\"\" Shows chart", "> 1: for key in data.keys(): if key.startswith(user_input[1]): yearly_conversation(key) break else: print('Conversation not", "# Monthly (not working) def monthly_conversation(conversation): # TODO not working charts for monthly", "by the get_data() function :param user_name: person name, or key from get_data() function", "== 'y': count_messages() elif len(user_input) > 1 and user_input[1] == '-h': hours_chats(float(user_input[2])) else:", "> 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input) > 1: for", "== 'yearly': if len(user_input) > 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if", "messages += interval_count(sender, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]')) interval_plot(messages) # Yearly def yearly(conversation=None): \"\"\" Shows", "def set_source(filename): \"\"\" Sets source global variable to the path of .zip file.", "plt.bar(messages.columns, messages.iloc[0]) plt.savefig('messages.pdf') plt.show() if __name__=='__main__': while True: filename = input('Enter filename: ')", "counted. Count characters?[y/n] ').lower() == 'y': count_characters() else: try: data = json.loads(open('messages.json', 'r',", "x: x.dt.to_period(\"M\").astype('datetime64[ns]')) interval_plot(messages) # Yearly def yearly(conversation=None): \"\"\" Shows chart of number of", "= input('Enter filename: ') filename = f'file:///{filename}' if filename[1] == ':'\\ else (f'file:./{filename}'", "only on Windows) >>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip') \"\"\" filename = f'file:///{filename}' if filename[1] == ':'", "previously defined timeframe :param messages: dictionary of number of messages grouped by timeframe", "< 3: daily_conversation(key) else: daily_conversation(key, float(user_input[2])) break else: print('Conversation not found.') else: print('Please", "or characters from messages and saves output to the file. :param chars: True", "to the file. :param chars: True for counting characters, False for counting messages", "+ '/message_' + str(i) + '.json').read())[ 'messages']).iloc[:, 0]) except KeyError: break total[sender] =", "shift by and count messages differently (default 0.0) :return: dictionary of number of", "user_input = input('>').split(' ') if user_input[0] == 'exit': break if user_input[0] == ''", "data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.startswith(user_input[1]): characters_conversation_statistics(data, key)", "applicable a key pointing to a specific conversation, otherwise None \"\"\" try: data", "{x.split('/')[2] for x in namelist if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}", "key in data.keys(): if key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation not found.') return", "'messages']).iloc[:, 0]) except KeyError: break total[sender] = {k.encode('iso-8859-1').decode('utf-8'): v for k, v in", "KeyError: break total[sender] = {k.encode('iso-8859-1').decode('utf-8'): v for k, v in messages.items()} total[sender]['total'] =", "saves output to messages.json. :return: None \"\"\" namelist = source.namelist() total, senders =", "difference) break else: print('Conversation not found.') def daily_conversation(conversation, delta=0.0): \"\"\" Shows chart of", "of messages send across all conversations by hour throughout the day. :param delta:", "\"\"\" Sets source global variable to the path of .zip file. :param filename:", "logging.error('File not found, try again.') def get_data(conversation=None, chars=False, user=False): \"\"\" Reads data from", "total_values = total_values.sort_values() plt.rcdefaults() plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:]) plt.show() def conversation_statistics(data_source, conversation): \"\"\" Prints messages", "def characters_conversation_statistics(data_source, conversation): \"\"\" Prints characters statistics for specific conversation of given data", "i += 1 frame = pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_' + str(i)", "from the beginning of the conversation. :param conversation: conversation id, or key from", "the string. :param conversation: beginning of the conversation id or None for overall", "\"\"\" messages = collections.Counter() for sender in {x.split('/')[2] for x in source.namelist() if", "messages += collections.Counter(pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())[ 'messages']).iloc[:,", "\"\"\" Shows chart of average number of messages send across all conversations by", "urllib.request import urlopen import pandas as pd from matplotlib import pyplot as plt", "data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.startswith(user_input[1]): conversation_statistics(data, key)", "else: print('Please specify conversation.') except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower()", "all conversations (default None) :return: None \"\"\" if conversation is None: yearly_chats() else:", "number of messages send across all conversations by hour throughout the day. :param", "x in source.namelist() if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages +=", "if chars else 'Messages not counted.') # Counting messages and characters def count_messages():", "source source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) except URLError: logging.error('File not found, try again.') def get_data(conversation=None,", "> 0].sort_values(ascending=False) data_source.index = data_source.index.map(lambda x: x.split('_')[0][:30]) pd.set_option('display.max_rows', None) print(user_name, 'statistics:') print(data_source) #", "+= collections.Counter(pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())[ 'messages']).iloc[:, 0])", "try: source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) break except URLError: print('File not found, try again.') while", "messages and saves output to messages.json. :return: None \"\"\" namelist = source.namelist() total,", "60)):02}' for x in range(-(-math.floor(delta) % 24), math.floor(delta) % 24 if math.floor(delta) %", "for overall statistics (default None) :param chars: True for character statistics instead of", "def hours(difference, conversation=None): \"\"\" Shows chart of average number of messages send by", "index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.xticks(list(range(24)), [f'{x % 24}:{int(abs((delta - int(delta)) * 60)):02}' for", "\"\"\" hours_plot(interval_count(conversation, lambda x: x.dt.hour, delta), delta) def hours_chats(delta=0.0): \"\"\" Shows chart of", "set_source('facebook-YourName.zip') Absolute path (works only on Windows) >>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip') \"\"\" filename = f'file:///{filename}'", "source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) break except URLError: print('File not found, try again.') while True:", "filename = input('Enter filename: ') filename = f'file:///{filename}' if filename[1] == ':'\\ else", "user_input[0] == 'hours': if len(user_input) > 1 and not user_input[1] == '-h': try:", "'r', encoding='utf-8').read()) if user: data = pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if key.lower().startswith(conversation.lower()):", "output, ensure_ascii=False) def count_characters(): \"\"\" Counts characters from messages and saves output to", "average number of messages grouped by hour throughout the day. :param messages: dictionary", "user_input[0] == 'count': count_messages() if user_input[0] == 'chars': count_characters() if user_input[0] == 'help'", "\"\"\" Shows chart based on previously defined timeframe :param messages: dictionary of number", "statistics for specific user') print(' yearly [name] - yearly messages') print(' [specific user]')", "'-c': try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.startswith(user_input[1]):", "'hours': if len(user_input) > 1 and not user_input[1] == '-h': try: data =", "axis=1) counted_all += sum(frame['counted'], collections.Counter()) except KeyError: break total[sender] = dict(counted_all) with open('messages_chars.json',", "conversation: conversation id, or key from get_data() function :param delta: number of hours", "counted. Count messages?[y/n] ').lower() == 'y': count_messages() elif len(user_input) > 1 and user_input[1]", "and x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.date, delta)", "number of messages grouped by timeframe :return: None \"\"\" messages = pd.Series(messages).sort_index() print(messages.describe())", "f'file:///{filename}' if filename[1] == ':'\\ else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try: source", "prompt') print(' exit - exits the program') if user_input[0] == 'stats': if len(user_input)", "Count messages?[y/n] ').lower() == 'y': count_messages() else: yearly_chats() if user_input[0] == 'hours': if", "try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.startswith(user_input[1]): characters_conversation_statistics(data,", "provide relative path to file >>> set_source('facebook-YourName.zip') Absolute path (works only on Windows)", "and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.date, delta) interval_plot(messages) #", "else: return data, None except FileNotFoundError: logging.error('Characters not counted.' if chars else 'Messages", "Shows chart of number of messages per day. :param difference: number of hours", "'help' or user_input[0] == '?': print('Messenger Counter available commands:') print(' count - counts", "source. :param data_source: dictionary containing prepared data generated by the get_data() function :param", "return data, key else: logging.error('Conversation not found.') return None, None else: return data,", "- displays statistics for counted messages') print(' [detailed statistics for specific conversation, character", "< 3: hours_conversation(key) else: hours_conversation(key, float(user_input[2])) break else: print('Conversation not found.') else: print('Please", "from messages and saves output to messages_chars.json. :return: None \"\"\" namelist = source.namelist()", "user, hours difference]') print(' hours [name, -h] - hour distribution of messages') print('", "(default False) :return: None \"\"\" if chars: count_characters() else: count_messages() # Statistics def", "get_data() function :param delta: number of hours to time shift by and show", "plt.bar(messages.columns, messages.iloc[0]) plt.xticks(list(range(24)), [f'{x % 24}:{int(abs((delta - int(delta)) * 60)):02}' for x in", "input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() elif len(user_input) > 1", "'' or user_input[0] == 'count': count_messages() if user_input[0] == 'chars': count_characters() if user_input[0]", "character statistics]') print(' user [name] - detailed statistics for specific user') print(' yearly", "\"\"\" Counts characters from messages and saves output to messages_chars.json. :return: None \"\"\"", "messages grouped by hour throughout the day. :param messages: dictionary of number of", "collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1) counted_all += sum(frame['counted'], collections.Counter()) except KeyError: break total[sender] = dict(counted_all) with", "of the conversation. :param conversation: conversation id, or key from get_data() function :param", "(x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.hour,", "math import zipfile import logging from urllib.error import URLError from urllib.request import urlopen", "conversation_statistics(data_source, conversation): \"\"\" Prints messages statistics for specific conversation of given data source.", "help - displays this help prompt') print(' exit - exits the program') if", "of messages per year from the beginning of the conversation. :param conversation: conversation", ":param conversation: conversation id, or key from get_data() function :return: None \"\"\" interval_plot(interval_count(conversation,", "send across all conversations by hour throughout the day. :param delta: number of", "is not None: for key in data.keys(): if key.lower().startswith(conversation.lower()): return data, key else:", "data_source = data_source.iloc[:, -1] data_source = data_source.sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) print(f'Total characters: {data_source.sum()}')", "messages') print(' [specific user]') # print(' monthly [name, -d] - monthly messages (available", "data.keys(): if key.lower().startswith(conversation.lower()): yearly_conversation(key) break else: print('Conversation not found.') def yearly_conversation(conversation): \"\"\" Shows", "None: if chars: characters_statistics(data_source) else: messages_statistics(data_source) else: if chars: raise NotImplementedError() else: print(conversation)", "pyplot as plt # Getting data def set_source(filename): \"\"\" Sets source global variable", "and if applicable a key pointing to a specific conversation, otherwise None \"\"\"", "and user_input[1] == '-h': daily_chats(float(user_input[2])) else: daily_chats() if user_input[0] == 'monthly': if len(user_input)", "pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation not", "messages overall statistics of given data source. :param data_source: dictionary containing prepared data", "id, or key from get_data() function :return: None \"\"\" pass # User statistics", "hours to time shift by and show statistics differently :return: None \"\"\" messages", ":param user: True for user name instead of conversation id, False otherwise (default", "found.') return None, None else: return data, None except FileNotFoundError: logging.error('Characters not counted.'", ":return: dictionary containing the data and if applicable a key pointing to a", "key else: logging.error('Conversation not found.') return None, None else: return data, None except", "by the get_data() function :param conversation: conversation id, or key from get_data() function", "given timeframe function :param inbox_name: directory name that contains requested messages (usually conversation", "(default None) :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]'))) def monthly_chats(): \"\"\" Shows", "throughout the day. :param difference: number of hours to time shift by and", "found.') def daily_conversation(conversation, delta=0.0): \"\"\" Shows chart of number of messages per day", "count_messages() if user_input[0] == 'user': if len(user_input) > 1: try: data = json.loads(open('messages.json',", "{data_source.sum()}') # TODO characters conversation statistics def characters_conversation_statistics(data_source, conversation): \"\"\" Prints characters statistics", "difference]') print(' daily [name, -h] - daily messages') print(' [specific user, hours difference]')", "messages) plt.savefig('messages.pdf') plt.show() # Hours def hours(difference, conversation=None): \"\"\" Shows chart of average", "open('messages.json', 'w', encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False) def count_characters(): \"\"\" Counts characters", "statistics]') print(' user [name] - detailed statistics for specific user') print(' yearly [name]", "json.dump(total, output, ensure_ascii=False) def count_characters(): \"\"\" Counts characters from messages and saves output", "def statistics(data_source, conversation=None, chars=False): \"\"\" Prints statistics of given data source. :param data_source:", "break else: print('Conversation not found.') def daily_conversation(conversation, delta=0.0): \"\"\" Shows chart of number", "key.startswith(user_input[1]): if len(user_input) < 3: hours_conversation(key) else: hours_conversation(key, float(user_input[2])) break else: print('Conversation not", ":return: None \"\"\" data_source = pd.DataFrame(data_source).fillna(0).astype('int') pd.set_option('display.max_rows', None) total_values = data_source.loc['total'].sort_values(ascending=False) print(total_values) print(total_values.describe())", "relative path to file >>> set_source('facebook-YourName.zip') Absolute path (works only on Windows) >>>", "is None: daily_chats(difference) else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys():", "pd.DataFrame(data_source) data_source['total'] = data_source.sum(axis=1) data_source = data_source.iloc[:, -1] data_source = data_source.sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None)", ":param messages: dictionary of number of messages grouped by timeframe :return: None \"\"\"", "key.startswith(user_input[1]): conversation_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError: if input('Messages not", "function :param conversation: conversation id or None for overall statistics (default None) :param", "\"\"\" Counts number of messages based on given timeframe function :param inbox_name: directory", "Count characters?[y/n] ').lower() == 'y': count_characters() elif len(user_input) > 1 and not user_input[1]", "statistics (default None) :param chars: True for character statistics instead of messages, False", "plt.xticks(list(range(24)), [f'{x % 24}:{int(abs((delta - int(delta)) * 60)):02}' for x in range(-(-math.floor(delta) %", "KeyError: break total[sender] = dict(counted_all) with open('messages_chars.json', 'w', encoding='utf-8') as output: json.dump(total, output,", "3: daily_conversation(key) else: daily_conversation(key, float(user_input[2])) break else: print('Conversation not found.') else: print('Please specify", ":return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]'))) def monthly_chats(): \"\"\" Shows chart of", "in messages.items()} total[sender]['total'] = sum(messages.values()) with open('messages.json', 'w', encoding='utf-8') as output: json.dump(total, output,", "messages: dictionary of number of messages grouped by timeframe :param delta: number of", "for counting messages (default False) :return: None \"\"\" if chars: count_characters() else: count_messages()", "Hours def hours(difference, conversation=None): \"\"\" Shows chart of average number of messages send", "characters conversation statistics def characters_conversation_statistics(data_source, conversation): \"\"\" Prints characters statistics for specific conversation", "# Daily def daily(difference, conversation=None): \"\"\" Shows chart of number of messages per", "generated by the get_data() function :param conversation: conversation id or None for overall", "and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]')) interval_plot(messages) # Yearly", "print(data_source) # Intervals def interval_count(inbox_name, function, delta=0.0): \"\"\" Counts number of messages based", "hours [name, -h] - hour distribution of messages') print(' [specific user, hours difference]')", "None \"\"\" if conversation is None: if chars: characters_statistics(data_source) else: messages_statistics(data_source) else: if", "file. :param chars: True for counting characters, False for counting messages (default False)", "# iterates over all .json files in requested directory messages += collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads( source.open('messages/inbox/'", "if len(user_input) > 1: for key in data.keys(): if key.startswith(user_input[1]): monthly_conversation(key) else: print('Conversation", "'y': count_messages() if user_input[0] == 'user': if len(user_input) > 1: try: data =", "if chars: count_characters() else: count_messages() # Statistics def statistics(data_source, conversation=None, chars=False): \"\"\" Prints", "print(data_source) print(f'Total characters: {data_source.sum()}') # TODO characters conversation statistics def characters_conversation_statistics(data_source, conversation): \"\"\"", "== '?': print('Messenger Counter available commands:') print(' count - counts all messages and", "\"\"\" data_source = pd.DataFrame(data_source) data_source['total'] = data_source.sum(axis=1) data_source = data_source.iloc[:, -1] data_source =", "False) :param user: True for user name instead of conversation id, False otherwise", "function :return: None \"\"\" pass # User statistics def user_statistics(data_source, user_name): \"\"\" Prints", "f'file:./{filename}.zip') try: global source source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) except URLError: logging.error('File not found, try", "per day. :param difference: number of hours to time shift by and show", "24), math.floor(delta) % 24 if math.floor(delta) % 24 != 0 else 24)], rotation=90)", "+ str(i) + '.json').read())[ 'messages']).iloc[:, 0]) except KeyError: break total[sender] = {k.encode('iso-8859-1').decode('utf-8'): v", "in data.keys(): if key.lower().startswith(conversation.lower()): yearly_conversation(key) break else: print('Conversation not found.') def yearly_conversation(conversation): \"\"\"", "except KeyError: break return messages def interval_plot(messages): \"\"\" Shows chart based on previously", "= pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation", "== 'hours': if len(user_input) > 1 and not user_input[1] == '-h': try: data", "hour throughout the day. :param conversation: conversation id, or key from get_data() function", "= collections.Counter(), 0 while True: try: i += 1 frame = pd.DataFrame(json.loads( source.open('messages/inbox/'", "user name instead of conversation id, False otherwise (default False) :return: dictionary containing", "if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: monthly_chats() if", "of messages grouped by timeframe :param delta: number of hours to time shift", "counted_all, i = collections.Counter(), 0 while True: try: i += 1 frame =", "# Statistics def statistics(data_source, conversation=None, chars=False): \"\"\" Prints statistics of given data source.", "= pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if key.startswith(' '.join(user_input[1:])): user_statistics(data, key) break else:", "zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) break except URLError: print('File not found, try again.') while True: user_input =", "(available soon)') # print(' [specific user, day difference]') print(' daily [name, -h] -", "+= sum(frame['counted'], collections.Counter()) except KeyError: break total[sender] = dict(counted_all) with open('messages_chars.json', 'w', encoding='utf-8')", "2 and user_input[2] == '-c': try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) for key", "data_source = pd.DataFrame(data_source) data_source = data_source.loc[:, conversation] data_source = data_source[data_source > 0].sort_values(ascending=False).astype('int') pd.set_option('display.max_rows',", "count_characters() if user_input[0] == 'help' or user_input[0] == '?': print('Messenger Counter available commands:')", "of messages per day. :param difference: number of hours to time shift by", "not counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: monthly_chats() if user_input[0] ==", "def interval_plot(messages): \"\"\" Shows chart based on previously defined timeframe :param messages: dictionary", "'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.date, delta) interval_plot(messages) # Monthly (not working)", "conversation=None, chars=False): \"\"\" Prints statistics of given data source. :param data_source: dictionary containing", "if key.startswith(user_input[1]): conversation_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError: if input('Messages", "chars: raise NotImplementedError() else: print(conversation) conversation_statistics(data_source, conversation) def messages_statistics(data_source): \"\"\" Prints messages overall", ":param conversation: conversation id, or key from get_data() function :return: None \"\"\" data_source", "- displays this help prompt') print(' exit - exits the program') if user_input[0]", "interval_plot(messages) # Monthly (not working) def monthly_conversation(conversation): # TODO not working charts for", "data.keys(): if key.startswith(user_input[1]): characters_conversation_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError: if", "0]) except KeyError: break total[sender] = {k.encode('iso-8859-1').decode('utf-8'): v for k, v in messages.items()}", "conversation. :param conversation: conversation id, or key from get_data() function :return: None \"\"\"", "characters from messages and saves output to the file. :param chars: True for", "NotImplementedError() else: print(conversation) conversation_statistics(data_source, conversation) def messages_statistics(data_source): \"\"\" Prints messages overall statistics of", "defined timeframe :param messages: dictionary of number of messages grouped by timeframe :return:", "time part :param delta: number of hours to time shift by and count", "hours_conversation(key, difference) break else: print('Conversation not found.') def hours_conversation(conversation, delta=0.0): \"\"\" Shows chart", "x.dt.hour, delta) hours_plot(messages, delta) def hours_plot(messages, delta): \"\"\" Shows chart of average number", "if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')} for sender in senders: messages,", "messages grouped by timeframe :return: None \"\"\" messages = pd.Series(messages).sort_index() print(messages.describe()) plt.bar(messages.index, messages)", "statistics def characters_conversation_statistics(data_source, conversation): \"\"\" Prints characters statistics for specific conversation of given", "that returns requested time part :param delta: number of hours to time shift", "distribution of messages') print(' [specific user, hours difference]') print(' help - displays this", "difference]') print(' hours [name, -h] - hour distribution of messages') print(' [specific user,", "= {k.encode('iso-8859-1').decode('utf-8'): v for k, v in messages.items()} total[sender]['total'] = sum(messages.values()) with open('messages.json',", "FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: print('Please", "= zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) except URLError: logging.error('File not found, try again.') def get_data(conversation=None, chars=False, user=False):", "data, key else: logging.error('Conversation not found.') return None, None if conversation is not", "'y': count_characters() elif len(user_input) > 1 and not user_input[1] == '-c': try: data", "import collections import io import json import math import zipfile import logging from", "grouped by hour throughout the day. :param messages: dictionary of number of messages", "data from messages.json or messages_chars.json and finds key based on the beginning of", ":param chars: True for character statistics instead of messages, False otherwise (default False)", "or user_input[0] == 'count': count_messages() if user_input[0] == 'chars': count_characters() if user_input[0] ==", "len(user_input) > 1: for key in data.keys(): if key.startswith(user_input[1]): if len(user_input) < 3:", "import logging from urllib.error import URLError from urllib.request import urlopen import pandas as", "senders: messages, i = collections.Counter(), 0 while True: try: i += 1 messages", "and x.startswith('messages/inbox/') and x != 'messages/inbox/')} for sender in senders: messages, i =", "path (works only on Windows) >>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip') \"\"\" filename = f'file:///{filename}' if filename[1]", "daily_conversation(key, difference) break else: print('Conversation not found.') def daily_conversation(conversation, delta=0.0): \"\"\" Shows chart", "counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: yearly_chats() if user_input[0] == 'hours':", "function: pandas function that returns requested time part :param delta: number of hours", "else: print('Conversation not found.') def yearly_conversation(conversation): \"\"\" Shows chart of number of messages", "path of .zip file. :param filename: path to the downloaded .zip file :return:", "if filename.endswith('.zip') else f'file:./{filename}.zip') try: source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) break except URLError: print('File not", "chars: characters_statistics(data_source) else: messages_statistics(data_source) else: if chars: raise NotImplementedError() else: print(conversation) conversation_statistics(data_source, conversation)", "counting messages in messages.json (default False) :param user: True for user name instead", "collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads( source.open('messages/inbox/' + inbox_name + '/message_' + str(i) + '.json').read())[ 'messages']).iloc[:, 1], unit='ms').dt.tz_localize('UTC').dt.tz_convert(", "from get_data() function :return: None \"\"\" data_source = data_source.loc[user_name] data_source = data_source[data_source >", "not found.') else: print('Please specify conversation.') except FileNotFoundError: if input('Messages not counted. Count", ":param conversation: beginning of the conversation id or None for overall statistics (default", "try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input) > 1: for key in", "plt.xlim(-1, 24) plt.savefig('messages.pdf') plt.show() # Daily def daily(difference, conversation=None): \"\"\" Shows chart of", "path to file >>> set_source('facebook-YourName.zip') Absolute path (works only on Windows) >>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip')", "1 messages += collections.Counter(pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())[", "differently :param conversation: conversation id or None for statistics from all conversations (default", "if conversation is None: hours_chats(difference) else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key", "month. :param conversation: conversation id or None for statistics from all conversations (default", "print('Conversation not found.') def daily_conversation(conversation, delta=0.0): \"\"\" Shows chart of number of messages", "encoding='utf-8').read()) for key in data.keys(): if key.startswith(user_input[1]): conversation_statistics(data, key) break else: print('Conversation not", "source.open('messages/inbox/' + inbox_name + '/message_' + str(i) + '.json').read())[ 'messages']).iloc[:, 1], unit='ms').dt.tz_localize('UTC').dt.tz_convert( 'Europe/Warsaw').add(pd.Timedelta(hours=-delta))))", "in data.keys(): if key.startswith(user_input[1]): if len(user_input) < 3: hours_conversation(key) else: hours_conversation(key, float(user_input[2])) break", "key in data.index: if key.startswith(' '.join(user_input[1:])): user_statistics(data, key) break else: print('Conversation not found.')", "chars: True for counting characters, False for counting messages (default False) :return: None", "timeframe :param messages: dictionary of number of messages grouped by timeframe :return: None", "1 and not user_input[1] == '-c': try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for", "interval_count(sender, lambda x: x.dt.date, delta) interval_plot(messages) # Monthly (not working) def monthly_conversation(conversation): #", "lambda x: x.dt.hour, delta), delta) def hours_chats(delta=0.0): \"\"\" Shows chart of average number", "statistics for specific person of given data source. :param data_source: dictionary containing prepared", "of hours to time shift by and show statistics differently (default 0.0) :return:", "daily messages') print(' [specific user, hours difference]') print(' hours [name, -h] - hour", "print(total_values.describe()) total_values = total_values.sort_values() plt.rcdefaults() plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:]) plt.show() def conversation_statistics(data_source, conversation): \"\"\" Prints", "if len(user_input) < 3: hours_conversation(key) else: hours_conversation(key, float(user_input[2])) break else: print('Conversation not found.')", "returns requested time part :param delta: number of hours to time shift by", "FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: yearly_chats()", "statistics differently :return: None \"\"\" messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.xticks(list(range(24)),", "if user_input[0] == 'help' or user_input[0] == '?': print('Messenger Counter available commands:') print('", "by timeframe :param delta: number of hours to time shift by and show", "(x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.year)", "try: i += 1 messages += collections.Counter(pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_' +", "data.index: if key.startswith(' '.join(user_input[1:])): user_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError:", "counted.') # Counting messages and characters def count_messages(): \"\"\" Counts messages and saves", "counted. Count messages?[y/n] ').lower() == 'y': count_messages() if user_input[0] == 'user': if len(user_input)", "data = json.loads(open('messages_chars.json' if chars else 'messages.json', 'r', encoding='utf-8').read()) if user: data =", "None, None else: return data, None except FileNotFoundError: logging.error('Characters not counted.' if chars", "per year from the beginning of the conversation. :param conversation: conversation id, or", "> 0].sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) def characters_statistics(data_source): \"\"\" Prints characters statistics of given", "per month across all conversation. :return: None \"\"\" messages = collections.Counter() for sender", "try: data = json.loads(open('messages_chars.json' if chars else 'messages.json', 'r', encoding='utf-8').read()) if user: data", "instead of conversation id, False otherwise (default False) :return: dictionary containing the data", "'r', encoding='utf-8').read()) data = pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if key.startswith(' '.join(user_input[1:])): user_statistics(data,", "').lower() == 'y': count_messages() else: monthly_chats() if user_input[0] == 'yearly': if len(user_input) >", "+= interval_count(sender, lambda x: x.dt.year) messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.savefig('messages.pdf')", "'r', encoding='utf-8').read()) characters_statistics(data) except FileNotFoundError: if input('Characters not counted. Count characters?[y/n] ').lower() ==", "key.startswith(' '.join(user_input[1:])): user_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError: if input('Messages", ":return: None \"\"\" data_source = pd.DataFrame(data_source) data_source['total'] = data_source.sum(axis=1) data_source = data_source.iloc[:, -1]", "None) :return: None \"\"\" if conversation is None: hours_chats(difference) else: data = json.loads(open('messages.json',", "function :return: None \"\"\" data_source = data_source.loc[user_name] data_source = data_source[data_source > 0].sort_values(ascending=False) data_source.index", ":param delta: number of hours to time shift by and count messages differently", "0 while True: try: i += 1 # iterates over all .json files", "'y': count_messages() elif len(user_input) > 1 and user_input[1] == '-h': hours_chats(float(user_input[2])) else: hours_chats()", "messages') print(' [specific user, hours difference]') print(' help - displays this help prompt')", "function :param conversation: conversation id, or key from get_data() function :return: None \"\"\"", "key.startswith(user_input[1]): if len(user_input) < 3: daily_conversation(key) else: daily_conversation(key, float(user_input[2])) break else: print('Conversation not", "time shift by and show statistics differently :return: None \"\"\" messages = pd.DataFrame(messages,", "data generated by the get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source).fillna(0).astype('int') pd.set_option('display.max_rows',", "key in data.keys(): if key.startswith(user_input[1]): characters_conversation_statistics(data, key) break else: print('Conversation not found.') except", "i = collections.Counter(), 0 while True: try: i += 1 messages += collections.Counter(pd.DataFrame(json.loads(", "containing prepared data generated by the get_data() function :param conversation: conversation id, or", "data.index: if key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation not found.') return None, None", "for specific conversation, character statistics]') print(' user [name] - detailed statistics for specific", "data_source['total'] = data_source.sum(axis=1) data_source = data_source.iloc[:, -1] data_source = data_source.sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source)", "chart of average number of messages send in specific conversation by hour throughout", "\"\"\" messages = pd.Series(messages).sort_index() print(messages.describe()) plt.bar(messages.index, messages) plt.savefig('messages.pdf') plt.show() # Hours def hours(difference,", "len(user_input) > 1 and user_input[1] == '-h': daily_chats(float(user_input[2])) else: daily_chats() if user_input[0] ==", "x.dt.to_period(\"M\").astype('datetime64[ns]')) interval_plot(messages) # Yearly def yearly(conversation=None): \"\"\" Shows chart of number of messages", "function :return: None \"\"\" data_source = pd.DataFrame(data_source) data_source['total'] = data_source.sum(axis=1) data_source = data_source.iloc[:,", "-d] - monthly messages (available soon)') # print(' [specific user, day difference]') print('", "statistics from all conversations (default None) :return: None \"\"\" if conversation is None:", "else 'messages.json', 'r', encoding='utf-8').read()) if user: data = pd.DataFrame(data).fillna(0).astype('int') for key in data.index:", "Intervals def interval_count(inbox_name, function, delta=0.0): \"\"\" Counts number of messages based on given", "get_data() function :return: None \"\"\" data_source = data_source.loc[user_name] data_source = data_source[data_source > 0].sort_values(ascending=False)", "FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: monthly_chats()", "None You can provide relative path to file >>> set_source('facebook-YourName.zip') Absolute path (works", "that contains requested messages (usually conversation id) :param function: pandas function that returns", "'w', encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False) def count(chars=False): \"\"\" Counts messages or", "1 frame = pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())['messages'])", "filename.endswith('.zip') else f'file:./{filename}.zip') try: source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) break except URLError: print('File not found,", "messages?[y/n] ').lower() == 'y': count_messages() else: print('Please specify user name.') if user_input[0] ==", "messages += interval_count(sender, lambda x: x.dt.date, delta) interval_plot(messages) # Monthly (not working) def", "Shows chart of average number of messages send by hour throughout the day.", "number of messages per day. :param difference: number of hours to time shift", "messages += interval_count(sender, lambda x: x.dt.hour, delta) hours_plot(messages, delta) def hours_plot(messages, delta): \"\"\"", "count(chars=False): \"\"\" Counts messages or characters from messages and saves output to the", "True: user_input = input('>').split(' ') if user_input[0] == 'exit': break if user_input[0] ==", "as output: json.dump(total, output, ensure_ascii=False) def count_characters(): \"\"\" Counts characters from messages and", "+ '/message_' + str(i) + '.json').read())['messages']) frame['counted'] = frame.apply( lambda row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1)", "None for statistics from all conversations (default None) :return: None \"\"\" interval_plot(interval_count(conversation, lambda", "daily_conversation(conversation, delta=0.0): \"\"\" Shows chart of number of messages per day from the", "delta=0.0): \"\"\" Counts number of messages based on given timeframe function :param inbox_name:", "chart of number of messages per day across all conversation. :param delta: number", "try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) messages_statistics(data) except FileNotFoundError: if input('Messages not counted.", "else: yearly_chats() if user_input[0] == 'hours': if len(user_input) > 1 and not user_input[1]", "shift by and show statistics differently (default 0.0) :return: None \"\"\" messages =", "= collections.Counter(), 0 while True: try: i += 1 # iterates over all", "\"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]'))) def monthly_chats(): \"\"\" Shows chart of number of", "input('>').split(' ') if user_input[0] == 'exit': break if user_input[0] == '' or user_input[0]", "= collections.Counter(), 0 while True: try: i += 1 messages += collections.Counter(pd.DataFrame(json.loads( source.open('messages/inbox/'", "key.startswith(user_input[1]): characters_conversation_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError: if input('Characters not", "counting messages (default False) :return: None \"\"\" if chars: count_characters() else: count_messages() #", "messages.json') print(' chars - counts all characters and saves to messages_chars.json') print(' stats", "data.keys(): if key.lower().startswith(conversation.lower()): hours_conversation(key, difference) break else: print('Conversation not found.') def hours_conversation(conversation, delta=0.0):", "messages in messages.json (default False) :param user: True for user name instead of", "namelist if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')} for sender in senders:", "\"\"\" Shows chart of number of messages per day from the beginning of", "as output: json.dump(total, output, ensure_ascii=False) def count(chars=False): \"\"\" Counts messages or characters from", "else f'file:./{filename}.zip') try: global source source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) except URLError: logging.error('File not found,", "> 2 and user_input[2] == '-c': try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) for", "specific conversation of given data source. :param data_source: dictionary containing prepared data generated", "None) print(data_source) def characters_statistics(data_source): \"\"\" Prints characters statistics of given data source. :param", "False) :return: dictionary containing the data and if applicable a key pointing to", "try: i += 1 # iterates over all .json files in requested directory", "\"\"\" namelist = source.namelist() total, senders = {}, {x.split('/')[2] for x in namelist", "by and count messages differently (default 0.0) :return: dictionary of number of messages", "def daily_conversation(conversation, delta=0.0): \"\"\" Shows chart of number of messages per day from", "== 'y': count_messages() else: print('Please specify user name.') if user_input[0] == 'daily': if", "True for user name instead of conversation id, False otherwise (default False) :return:", "lambda x: x.dt.hour, delta) hours_plot(messages, delta) def hours_plot(messages, delta): \"\"\" Shows chart of", "\"\"\" Prints characters statistics of given data source. :param data_source: dictionary containing prepared", "for sender in senders: counted_all, i = collections.Counter(), 0 while True: try: i", "plt.bar(messages.index, messages) plt.savefig('messages.pdf') plt.show() # Hours def hours(difference, conversation=None): \"\"\" Shows chart of", "None) :return: None \"\"\" if conversation is None: daily_chats(difference) else: data = json.loads(open('messages.json',", "variable to the path of .zip file. :param filename: path to the downloaded", "break else: print('Conversation not found.') def hours_conversation(conversation, delta=0.0): \"\"\" Shows chart of average", "sender in {x.split('/')[2] for x in source.namelist() if (x.endswith('/') and x.startswith('messages/inbox/') and x", "send by hour throughout the day. :param difference: number of hours to time", "id) :param function: pandas function that returns requested time part :param delta: number", "Count messages?[y/n] ').lower() == 'y': count_messages() else: monthly_chats() if user_input[0] == 'yearly': if", "characters and saves to messages_chars.json') print(' stats [conversation, -c] - displays statistics for", "\"\"\" Prints statistics of given data source. :param data_source: dictionary containing prepared data", "import URLError from urllib.request import urlopen import pandas as pd from matplotlib import", "from get_data() function :param delta: number of hours to time shift by and", "function that returns requested time part :param delta: number of hours to time", "dictionary containing prepared data generated by the get_data() function :param conversation: conversation id,", "encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): hours_conversation(key, difference) break else: print('Conversation not", "source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())['messages']) frame['counted'] = frame.apply( lambda", "containing prepared data generated by the get_data() function :param user_name: person name, or", "pass # User statistics def user_statistics(data_source, user_name): \"\"\" Prints detailed statistics for specific", "from get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source) data_source = data_source.loc[:, conversation]", "delta: number of hours to time shift by and show statistics differently (default", "math.floor(delta) % 24 if math.floor(delta) % 24 != 0 else 24)], rotation=90) plt.xlim(-1,", "if key.startswith(user_input[1]): characters_conversation_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError: if input('Characters", "chars else 'messages.json', 'r', encoding='utf-8').read()) if user: data = pd.DataFrame(data).fillna(0).astype('int') for key in", "\"\"\" data_source = pd.DataFrame(data_source).fillna(0).astype('int') pd.set_option('display.max_rows', None) total_values = data_source.loc['total'].sort_values(ascending=False) print(total_values) print(total_values.describe()) total_values =", "conversation is None: if chars: characters_statistics(data_source) else: messages_statistics(data_source) else: if chars: raise NotImplementedError()", "daily_chats() if user_input[0] == 'monthly': if len(user_input) > 1: try: data = json.loads(open('messages.json',", "pd.Series(messages).sort_index() print(messages.describe()) plt.bar(messages.index, messages) plt.savefig('messages.pdf') plt.show() # Hours def hours(difference, conversation=None): \"\"\" Shows", "count_characters() else: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) messages_statistics(data) except FileNotFoundError: if input('Messages", "soon)') # print(' [specific user, day difference]') print(' daily [name, -h] - daily", "(default 0.0) :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.date, delta)) def daily_chats(delta=0.0): \"\"\"", "count_messages() # Statistics def statistics(data_source, conversation=None, chars=False): \"\"\" Prints statistics of given data", "day across all conversation. :param delta: number of hours to time shift by", "print(' [specific user, hours difference]') print(' hours [name, -h] - hour distribution of", "[conversation, -c] - displays statistics for counted messages') print(' [detailed statistics for specific", "+ '/message_' + str(i) + '.json').read())[ 'messages']).iloc[:, 1], unit='ms').dt.tz_localize('UTC').dt.tz_convert( 'Europe/Warsaw').add(pd.Timedelta(hours=-delta)))) except KeyError: break", "+ '.json').read())['messages']) frame['counted'] = frame.apply( lambda row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1) counted_all += sum(frame['counted'], collections.Counter())", "== 'y': count_messages() elif len(user_input) > 1 and user_input[1] == '-c': try: data", "monthly_chats(): \"\"\" Shows chart of number of messages per month across all conversation.", "year across all conversation. :return: None \"\"\" messages = collections.Counter() for sender in", "print('Please specify conversation.') except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() ==", "True: try: i += 1 # iterates over all .json files in requested", "def interval_count(inbox_name, function, delta=0.0): \"\"\" Counts number of messages based on given timeframe", "None) :return: None \"\"\" if conversation is None: yearly_chats() else: data = json.loads(open('messages.json',", "by and show statistics differently :param conversation: conversation id or None for statistics", "0.0) :return: None \"\"\" messages = collections.Counter() for sender in {x.split('/')[2] for x", "of hours to time shift by and show statistics differently :return: None \"\"\"", "found.') else: print('Please specify conversation.') except FileNotFoundError: if input('Messages not counted. Count messages?[y/n]", "\"\"\" Shows chart of average number of messages grouped by hour throughout the", "data.keys(): if key.lower().startswith(conversation.lower()): daily_conversation(key, difference) break else: print('Conversation not found.') def daily_conversation(conversation, delta=0.0):", "None) total_values = data_source.loc['total'].sort_values(ascending=False) print(total_values) print(total_values.describe()) total_values = total_values.sort_values() plt.rcdefaults() plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:]) plt.show()", "data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) characters_statistics(data) except FileNotFoundError: if input('Characters not counted. Count", ":return: None \"\"\" data_source = pd.DataFrame(data_source) data_source = data_source.loc[:, conversation] data_source = data_source[data_source", "3: hours_conversation(key) else: hours_conversation(key, float(user_input[2])) break else: print('Conversation not found.') else: print('Please specify", "of messages per month across all conversation. :return: None \"\"\" messages = collections.Counter()", "messages_chars.json and finds key based on the beginning of the string. :param conversation:", "return None, None if conversation is not None: for key in data.keys(): if", "if filename[1] == ':'\\ else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try: source =", "messages and saves to messages.json') print(' chars - counts all characters and saves", "x.dt.to_period(\"M\").astype('datetime64[ns]'))) def monthly_chats(): \"\"\" Shows chart of number of messages per month across", "encoding='utf-8').read()) if len(user_input) > 1: for key in data.keys(): if key.startswith(user_input[1]): yearly_conversation(key) break", "if math.floor(delta) % 24 != 0 else 24)], rotation=90) plt.xlim(-1, 24) plt.savefig('messages.pdf') plt.show()", "1: for key in data.keys(): if key.startswith(user_input[1]): if len(user_input) < 3: hours_conversation(key) else:", "chart of number of messages per year from the beginning of the conversation.", "print(' yearly [name] - yearly messages') print(' [specific user]') # print(' monthly [name,", "for key in data.keys(): if key.startswith(user_input[1]): if len(user_input) < 3: daily_conversation(key) else: daily_conversation(key,", "over all .json files in requested directory messages += collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads( source.open('messages/inbox/' + inbox_name", "conversation=None): \"\"\" Shows chart of average number of messages send by hour throughout", "':'\\ else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try: source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) break except", "TODO characters conversation statistics def characters_conversation_statistics(data_source, conversation): \"\"\" Prints characters statistics for specific", "print('File not found, try again.') while True: user_input = input('>').split(' ') if user_input[0]", "for specific person of given data source. :param data_source: dictionary containing prepared data", "(usually conversation id) :param function: pandas function that returns requested time part :param", "based on previously defined timeframe :param messages: dictionary of number of messages grouped", "by and show statistics differently (default 0.0) :return: None \"\"\" interval_plot(interval_count(conversation, lambda x:", "not counted. Count characters?[y/n] ').lower() == 'y': count_characters() else: try: data = json.loads(open('messages.json',", "1 and not user_input[1] == '-h': try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if", "messages.json (default False) :param user: True for user name instead of conversation id,", "the conversation. :param conversation: conversation id, or key from get_data() function :param delta:", "in data.keys(): if key.lower().startswith(conversation.lower()): hours_conversation(key, difference) break else: print('Conversation not found.') def hours_conversation(conversation,", "'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): daily_conversation(key, difference) break else: print('Conversation", "of number of messages grouped by timeframe :return: None \"\"\" messages = pd.Series(messages).sort_index()", "except URLError: logging.error('File not found, try again.') def get_data(conversation=None, chars=False, user=False): \"\"\" Reads", "yearly(conversation=None): \"\"\" Shows chart of number of messages per year. :param conversation: conversation", "== 'y': count_messages() else: yearly_chats() if user_input[0] == 'hours': if len(user_input) > 1", "of number of messages per month. :param conversation: conversation id or None for", "in messages.json (default False) :param user: True for user name instead of conversation", "delta=0.0): \"\"\" Shows chart of number of messages per day from the beginning", "break if user_input[0] == '' or user_input[0] == 'count': count_messages() if user_input[0] ==", "filename[1] == ':' \\ else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try: global source", "if key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation not found.') return None, None else:", "for statistics from all conversations (default None) :return: None \"\"\" if conversation is", "prepared data generated by the get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source)", "None \"\"\" namelist = source.namelist() total, senders = {}, {x.split('/')[2] for x in", "== ':' \\ else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try: global source source", "lambda x: x.dt.date, delta) interval_plot(messages) # Monthly (not working) def monthly_conversation(conversation): # TODO", "file >>> set_source('facebook-YourName.zip') Absolute path (works only on Windows) >>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip') \"\"\" filename", "count_characters(): \"\"\" Counts characters from messages and saves output to messages_chars.json. :return: None", "to time shift by and show statistics differently :param conversation: conversation id or", "not found.') def hours_conversation(conversation, delta=0.0): \"\"\" Shows chart of average number of messages", "yearly [name] - yearly messages') print(' [specific user]') # print(' monthly [name, -d]", "data and if applicable a key pointing to a specific conversation, otherwise None", "else: count_messages() # Statistics def statistics(data_source, conversation=None, chars=False): \"\"\" Prints statistics of given", "the day. :param delta: number of hours to time shift by and show", "global variable to the path of .zip file. :param filename: path to the", "if len(user_input) > 1: for key in data.keys(): if key.startswith(user_input[1]): if len(user_input) <", "frame['counted'] = frame.apply( lambda row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1) counted_all += sum(frame['counted'], collections.Counter()) except KeyError:", "count_messages(): \"\"\" Counts messages and saves output to messages.json. :return: None \"\"\" namelist", "hours_plot(messages, delta) def hours_plot(messages, delta): \"\"\" Shows chart of average number of messages", "json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.startswith(user_input[1]): conversation_statistics(data, key) break else:", "messages per day. :param difference: number of hours to time shift by and", "if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: print('Please specify", "of number of messages per month across all conversation. :return: None \"\"\" messages", "plt.savefig('messages.pdf') plt.show() if __name__=='__main__': while True: filename = input('Enter filename: ') filename =", "(default 0.0) :return: None \"\"\" hours_plot(interval_count(conversation, lambda x: x.dt.hour, delta), delta) def hours_chats(delta=0.0):", "filename = f'file:///{filename}' if filename[1] == ':'\\ else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip')", "statistics (default None) :param chars: True for counting chars in messages_chars.json, False for", "None else: return data, None except FileNotFoundError: logging.error('Characters not counted.' if chars else", "messages_statistics(data_source): \"\"\" Prints messages overall statistics of given data source. :param data_source: dictionary", "def characters_statistics(data_source): \"\"\" Prints characters statistics of given data source. :param data_source: dictionary", "True: filename = input('Enter filename: ') filename = f'file:///{filename}' if filename[1] == ':'\\", "in source.namelist() if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender,", "= data_source[data_source > 0].sort_values(ascending=False) data_source.index = data_source.index.map(lambda x: x.split('_')[0][:30]) pd.set_option('display.max_rows', None) print(user_name, 'statistics:')", "print(conversation) conversation_statistics(data_source, conversation) def messages_statistics(data_source): \"\"\" Prints messages overall statistics of given data", "messages.json or messages_chars.json and finds key based on the beginning of the string.", "to messages_chars.json. :return: None \"\"\" namelist = source.namelist() total, senders = {}, {x.split('/')[2]", "encoding='utf-8').read()) data = pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if key.startswith(' '.join(user_input[1:])): user_statistics(data, key)", "overall statistics (default None) :param chars: True for character statistics instead of messages,", "return None, None else: return data, None except FileNotFoundError: logging.error('Characters not counted.' if", "data_source: dictionary containing prepared data generated by the get_data() function :param user_name: person", "messages per month. :param conversation: conversation id or None for statistics from all", "'user': if len(user_input) > 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) data =", "for monthly \"\"\" Shows chart of number of messages per month. :param conversation:", "id or None for overall statistics (default None) :param chars: True for character", "except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() elif", "conversations (default None) :return: None \"\"\" if conversation is None: daily_chats(difference) else: data", "key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation not found.') return None, None else: return", "throughout the day. :param delta: number of hours to time shift by and", "otherwise (default False) :return: None \"\"\" if conversation is None: if chars: characters_statistics(data_source)", "messages per day across all conversation. :param delta: number of hours to time", "not user_input[1] == '-h': try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input) >", "pd.DataFrame(data_source) data_source = data_source.loc[:, conversation] data_source = data_source[data_source > 0].sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source)", "day. :param conversation: conversation id, or key from get_data() function :param delta: number", "data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) data = pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if", "person of given data source. :param data_source: dictionary containing prepared data generated by", "found.') def yearly_conversation(conversation): \"\"\" Shows chart of number of messages per year from", "conversations (default None) :return: None \"\"\" if conversation is None: yearly_chats() else: data", "messages grouped by timeframe :param delta: number of hours to time shift by", "total[sender]['total'] = sum(messages.values()) with open('messages.json', 'w', encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False) def", "data_source = data_source.loc[:, conversation] data_source = data_source[data_source > 0].sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) def", "name.') if user_input[0] == 'daily': if len(user_input) > 1 and not user_input[1] ==", "print(' [detailed statistics for specific conversation, character statistics]') print(' user [name] - detailed", "chars: count_characters() else: count_messages() # Statistics def statistics(data_source, conversation=None, chars=False): \"\"\" Prints statistics", "to time shift by and show statistics differently :return: None \"\"\" messages =", "Daily def daily(difference, conversation=None): \"\"\" Shows chart of number of messages per day.", "charts for monthly \"\"\" Shows chart of number of messages per month. :param", "of the conversation. :param conversation: conversation id, or key from get_data() function :return:", "x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.year) messages = pd.DataFrame(messages, index=[0])", "get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source) data_source['total'] = data_source.sum(axis=1) data_source =", "% 24), math.floor(delta) % 24 if math.floor(delta) % 24 != 0 else 24)],", "== 'y': count_messages() if user_input[0] == 'user': if len(user_input) > 1: try: data", "conversation: conversation id, or key from get_data() function :return: None \"\"\" interval_plot(interval_count(conversation, lambda", "except FileNotFoundError: if input('Characters not counted. Count characters?[y/n] ').lower() == 'y': count_characters() else:", "> 1: for key in data.keys(): if key.startswith(user_input[1]): monthly_conversation(key) else: print('Conversation not found.')", "'-c': try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) characters_statistics(data) except FileNotFoundError: if input('Characters not", "= collections.Counter() for sender in {x.split('/')[2] for x in source.namelist() if (x.endswith('/') and", "interval_count(sender, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]')) interval_plot(messages) # Yearly def yearly(conversation=None): \"\"\" Shows chart of", "in specific conversation by hour throughout the day. :param conversation: conversation id, or", "number of hours to time shift by and show statistics differently :return: None", "None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.year)) def yearly_chats(): \"\"\" Shows chart of number", "conversation: conversation id or None for overall statistics (default None) :param chars: True", "print(user_name, 'statistics:') print(data_source) # Intervals def interval_count(inbox_name, function, delta=0.0): \"\"\" Counts number of", "statistics def user_statistics(data_source, user_name): \"\"\" Prints detailed statistics for specific person of given", "(x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')} for sender in senders: messages, i", "key from get_data() function :return: None \"\"\" data_source = data_source.loc[user_name] data_source = data_source[data_source", "24 != 0 else 24)], rotation=90) plt.xlim(-1, 24) plt.savefig('messages.pdf') plt.show() # Daily def", "count - counts all messages and saves to messages.json') print(' chars - counts", "URLError from urllib.request import urlopen import pandas as pd from matplotlib import pyplot", "or key from get_data() function :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.year)) def", "hours_conversation(key, float(user_input[2])) break else: print('Conversation not found.') else: print('Please specify conversation.') except FileNotFoundError:", "= pd.DataFrame(data_source).fillna(0).astype('int') pd.set_option('display.max_rows', None) total_values = data_source.loc['total'].sort_values(ascending=False) print(total_values) print(total_values.describe()) total_values = total_values.sort_values() plt.rcdefaults()", "all conversations (default None) :return: None \"\"\" if conversation is None: hours_chats(difference) else:", "for x in range(-(-math.floor(delta) % 24), math.floor(delta) % 24 if math.floor(delta) % 24", "Shows chart based on previously defined timeframe :param messages: dictionary of number of", "!= 'messages/inbox/')} for sender in senders: messages, i = collections.Counter(), 0 while True:", "grouped by timeframe :return: None \"\"\" messages = pd.Series(messages).sort_index() print(messages.describe()) plt.bar(messages.index, messages) plt.savefig('messages.pdf')", "== 'y': count_characters() else: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) messages_statistics(data) except FileNotFoundError:", "= f'file:///{filename}' if filename[1] == ':'\\ else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try:", "output to messages.json. :return: None \"\"\" namelist = source.namelist() total, senders = {},", "x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.hour, delta) hours_plot(messages, delta) def", "delta) hours_plot(messages, delta) def hours_plot(messages, delta): \"\"\" Shows chart of average number of", "user_input[0] == 'daily': if len(user_input) > 1 and not user_input[1] == '-h': try:", "for key in data.keys(): if key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation not found.')", "or messages_chars.json and finds key based on the beginning of the string. :param", "source global variable to the path of .zip file. :param filename: path to", "the day. :param conversation: conversation id, or key from get_data() function :param delta:", "0.0) :return: dictionary of number of messages grouped by timeframe \"\"\" messages, i", "data_source.iloc[:, -1] data_source = data_source.sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) print(f'Total characters: {data_source.sum()}') # TODO", "try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) characters_statistics(data) except FileNotFoundError: if input('Characters not counted.", "monthly_conversation(key) else: print('Conversation not found.') else: print('Please specify conversation.') except FileNotFoundError: if input('Messages", "or None for statistics from all conversations (default None) :return: None \"\"\" if", "None \"\"\" messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.xticks(list(range(24)), [f'{x % 24}:{int(abs((delta", "monthly messages (available soon)') # print(' [specific user, day difference]') print(' daily [name,", "None \"\"\" data_source = data_source.loc[user_name] data_source = data_source[data_source > 0].sort_values(ascending=False) data_source.index = data_source.index.map(lambda", "== '' or user_input[0] == 'count': count_messages() if user_input[0] == 'chars': count_characters() if", "to file >>> set_source('facebook-YourName.zip') Absolute path (works only on Windows) >>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip') \"\"\"", "= data_source.iloc[:, -1] data_source = data_source.sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) print(f'Total characters: {data_source.sum()}') #", "(works only on Windows) >>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip') \"\"\" filename = f'file:///{filename}' if filename[1] ==", "print(' user [name] - detailed statistics for specific user') print(' yearly [name] -", "key in data.keys(): if key.startswith(user_input[1]): yearly_conversation(key) break else: print('Conversation not found.') else: print('Please", "shift by and show statistics differently :return: None \"\"\" messages = pd.DataFrame(messages, index=[0])", "% 24}:{int(abs((delta - int(delta)) * 60)):02}' for x in range(-(-math.floor(delta) % 24), math.floor(delta)", "Shows chart of number of messages per year across all conversation. :return: None", "data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) messages_statistics(data) except FileNotFoundError: if input('Messages not counted. Count", "plt.rcdefaults() plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:]) plt.show() def conversation_statistics(data_source, conversation): \"\"\" Prints messages statistics for specific", "Counts characters from messages and saves output to messages_chars.json. :return: None \"\"\" namelist", "a specific conversation, otherwise None \"\"\" try: data = json.loads(open('messages_chars.json' if chars else", "key.startswith(user_input[1]): yearly_conversation(key) break else: print('Conversation not found.') else: print('Please specify conversation.') except FileNotFoundError:", "+= 1 messages += collections.Counter(pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_' + str(i) +", "- yearly messages') print(' [specific user]') # print(' monthly [name, -d] - monthly", "messages per day from the beginning of the conversation. :param conversation: conversation id,", "if key.lower().startswith(conversation.lower()): hours_conversation(key, difference) break else: print('Conversation not found.') def hours_conversation(conversation, delta=0.0): \"\"\"", ":param chars: True for counting characters, False for counting messages (default False) :return:", "delta) interval_plot(messages) # Monthly (not working) def monthly_conversation(conversation): # TODO not working charts", "delta), delta) def hours_chats(delta=0.0): \"\"\" Shows chart of average number of messages send", "in senders: counted_all, i = collections.Counter(), 0 while True: try: i += 1", "input('Characters not counted. Count characters?[y/n] ').lower() == 'y': count_characters() elif len(user_input) > 1", "user_input[0] == 'chars': count_characters() if user_input[0] == 'help' or user_input[0] == '?': print('Messenger", "all conversation. :param delta: number of hours to time shift by and show", "the beginning of the conversation. :param conversation: conversation id, or key from get_data()", "messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.savefig('messages.pdf') plt.show() if __name__=='__main__': while True:", "number of messages grouped by hour throughout the day. :param messages: dictionary of", "and show statistics differently :param conversation: conversation id or None for statistics from", "(default None) :return: None \"\"\" if conversation is None: daily_chats(difference) else: data =", "characters: {data_source.sum()}') # TODO characters conversation statistics def characters_conversation_statistics(data_source, conversation): \"\"\" Prints characters", "of number of messages per day. :param difference: number of hours to time", "{k.encode('iso-8859-1').decode('utf-8'): v for k, v in messages.items()} total[sender]['total'] = sum(messages.values()) with open('messages.json', 'w',", "if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x:", "print(f'Total characters: {data_source.sum()}') # TODO characters conversation statistics def characters_conversation_statistics(data_source, conversation): \"\"\" Prints", "of average number of messages grouped by hour throughout the day. :param messages:", "len(user_input) > 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input) > 1:", "differently (default 0.0) :return: None \"\"\" hours_plot(interval_count(conversation, lambda x: x.dt.hour, delta), delta) def", "Sets source global variable to the path of .zip file. :param filename: path", "key based on the beginning of the string. :param conversation: beginning of the", "as plt # Getting data def set_source(filename): \"\"\" Sets source global variable to", "data_source: dictionary containing prepared data generated by the get_data() function :return: None \"\"\"", "conversation statistics def characters_conversation_statistics(data_source, conversation): \"\"\" Prints characters statistics for specific conversation of", "').lower() == 'y': count_messages() elif len(user_input) > 1 and user_input[1] == '-h': hours_chats(float(user_input[2]))", "saves output to the file. :param chars: True for counting characters, False for", "None except FileNotFoundError: logging.error('Characters not counted.' if chars else 'Messages not counted.') #", "if key.startswith(user_input[1]): monthly_conversation(key) else: print('Conversation not found.') else: print('Please specify conversation.') except FileNotFoundError:", "number of messages send in specific conversation by hour throughout the day. :param", "for counted messages') print(' [detailed statistics for specific conversation, character statistics]') print(' user", "'r', encoding='utf-8').read()) for key in data.keys(): if key.startswith(user_input[1]): conversation_statistics(data, key) break else: print('Conversation", "messages.items()} total[sender]['total'] = sum(messages.values()) with open('messages.json', 'w', encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False)", "def get_data(conversation=None, chars=False, user=False): \"\"\" Reads data from messages.json or messages_chars.json and finds", "if len(user_input) > 2 and user_input[2] == '-c': try: data = json.loads(open('messages_chars.json', 'r',", "count_messages() elif len(user_input) > 1 and user_input[1] == '-h': daily_chats(float(user_input[2])) else: daily_chats() if", "encoding='utf-8').read()) if len(user_input) > 1: for key in data.keys(): if key.startswith(user_input[1]): if len(user_input)", "if chars else 'messages.json', 'r', encoding='utf-8').read()) if user: data = pd.DataFrame(data).fillna(0).astype('int') for key", "'stats': if len(user_input) > 2 and user_input[2] == '-c': try: data = json.loads(open('messages_chars.json',", "of the string. :param conversation: beginning of the conversation id or None for", "'y': count_messages() else: print('Please specify user name.') if user_input[0] == 'daily': if len(user_input)", "sender in senders: messages, i = collections.Counter(), 0 while True: try: i +=", "count_messages() if user_input[0] == 'chars': count_characters() if user_input[0] == 'help' or user_input[0] ==", "the get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source).fillna(0).astype('int') pd.set_option('display.max_rows', None) total_values =", "(f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try: global source source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) except URLError:", "key from get_data() function :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.year)) def yearly_chats():", "'statistics:') print(data_source) # Intervals def interval_count(inbox_name, function, delta=0.0): \"\"\" Counts number of messages", "== 'user': if len(user_input) > 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) data", "for key in data.keys(): if key.startswith(user_input[1]): characters_conversation_statistics(data, key) break else: print('Conversation not found.')", "if __name__=='__main__': while True: filename = input('Enter filename: ') filename = f'file:///{filename}' if", "json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): hours_conversation(key, difference) break else:", "print('Conversation not found.') except FileNotFoundError: if input('Characters not counted. Count characters?[y/n] ').lower() ==", "None \"\"\" if conversation is None: hours_chats(difference) else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read())", "to messages_chars.json') print(' stats [conversation, -c] - displays statistics for counted messages') print('", "number of hours to time shift by and count messages differently (default 0.0)", "= pd.DataFrame(data_source) data_source = data_source.loc[:, conversation] data_source = data_source[data_source > 0].sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None)", "of messages grouped by timeframe \"\"\" messages, i = collections.Counter(), 0 while True:", "if filename.endswith('.zip') else f'file:./{filename}.zip') try: global source source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) except URLError: logging.error('File", "data_source = data_source.loc[user_name] data_source = data_source[data_source > 0].sort_values(ascending=False) data_source.index = data_source.index.map(lambda x: x.split('_')[0][:30])", "chart of average number of messages send across all conversations by hour throughout", "generated by the get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source) data_source['total'] =", "grouped by timeframe \"\"\" messages, i = collections.Counter(), 0 while True: try: i", "statistics(data_source, conversation=None, chars=False): \"\"\" Prints statistics of given data source. :param data_source: dictionary", "difference) break else: print('Conversation not found.') def hours_conversation(conversation, delta=0.0): \"\"\" Shows chart of", "= json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.startswith(user_input[1]): characters_conversation_statistics(data, key) break", "number of messages based on given timeframe function :param inbox_name: directory name that", ":param chars: True for counting chars in messages_chars.json, False for counting messages in", "in {x.split('/')[2] for x in source.namelist() if (x.endswith('/') and x.startswith('messages/inbox/') and x !=", "break return messages def interval_plot(messages): \"\"\" Shows chart based on previously defined timeframe", "x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]')) interval_plot(messages) #", "True for counting characters, False for counting messages (default False) :return: None \"\"\"", "'y': count_messages() elif len(user_input) > 1 and user_input[1] == '-h': daily_chats(float(user_input[2])) else: daily_chats()", "in range(-(-math.floor(delta) % 24), math.floor(delta) % 24 if math.floor(delta) % 24 != 0", "specific user') print(' yearly [name] - yearly messages') print(' [specific user]') # print('", "pointing to a specific conversation, otherwise None \"\"\" try: data = json.loads(open('messages_chars.json' if", "sum(messages.values()) with open('messages.json', 'w', encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False) def count_characters(): \"\"\"", "'exit': break if user_input[0] == '' or user_input[0] == 'count': count_messages() if user_input[0]", "difference]') print(' help - displays this help prompt') print(' exit - exits the", "+= interval_count(sender, lambda x: x.dt.date, delta) interval_plot(messages) # Monthly (not working) def monthly_conversation(conversation):", "== 'help' or user_input[0] == '?': print('Messenger Counter available commands:') print(' count -", ".zip file. :param filename: path to the downloaded .zip file :return: None You", "by hour throughout the day. :param difference: number of hours to time shift", "specific conversation, character statistics]') print(' user [name] - detailed statistics for specific user')", "while True: filename = input('Enter filename: ') filename = f'file:///{filename}' if filename[1] ==", "x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.year) messages =", "total_values.sort_values() plt.rcdefaults() plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:]) plt.show() def conversation_statistics(data_source, conversation): \"\"\" Prints messages statistics for", "statistics of given data source. :param data_source: dictionary containing prepared data generated by", "not counted.') # Counting messages and characters def count_messages(): \"\"\" Counts messages and", "from all conversations (default None) :return: None \"\"\" if conversation is None: daily_chats(difference)", "conversation is None: hours_chats(difference) else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in", "collections.Counter(), 0 while True: try: i += 1 # iterates over all .json", "True: try: i += 1 frame = pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_'", "messages and characters def count_messages(): \"\"\" Counts messages and saves output to messages.json.", "== 'y': count_messages() else: monthly_chats() if user_input[0] == 'yearly': if len(user_input) > 1:", "of messages per day from the beginning of the conversation. :param conversation: conversation", "try: i += 1 frame = pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_' +", "found, try again.') while True: user_input = input('>').split(' ') if user_input[0] == 'exit':", "== 'stats': if len(user_input) > 2 and user_input[2] == '-c': try: data =", "None) :param chars: True for counting chars in messages_chars.json, False for counting messages", "interval_plot(interval_count(conversation, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]'))) def monthly_chats(): \"\"\" Shows chart of number of messages", "x.dt.year)) def yearly_chats(): \"\"\" Shows chart of number of messages per year across", "else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try: global source source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) except", "TODO not working charts for monthly \"\"\" Shows chart of number of messages", "str(i) + '.json').read())[ 'messages']).iloc[:, 0]) except KeyError: break total[sender] = {k.encode('iso-8859-1').decode('utf-8'): v for", "None) print(data_source) print(f'Total characters: {data_source.sum()}') # TODO characters conversation statistics def characters_conversation_statistics(data_source, conversation):", "by timeframe \"\"\" messages, i = collections.Counter(), 0 while True: try: i +=", "not found.') def daily_conversation(conversation, delta=0.0): \"\"\" Shows chart of number of messages per", "data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): hours_conversation(key, difference)", "\"\"\" if conversation is None: yearly_chats() else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for", "(f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try: source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) break except URLError: print('File", "if input('Characters not counted. Count characters?[y/n] ').lower() == 'y': count_characters() elif len(user_input) >", "finds key based on the beginning of the string. :param conversation: beginning of", "collections.Counter(pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())[ 'messages']).iloc[:, 0]) except", "messages (default False) :return: None \"\"\" if chars: count_characters() else: count_messages() # Statistics", "as pd from matplotlib import pyplot as plt # Getting data def set_source(filename):", "+ str(i) + '.json').read())['messages']) frame['counted'] = frame.apply( lambda row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1) counted_all +=", "and characters def count_messages(): \"\"\" Counts messages and saves output to messages.json. :return:", "character statistics instead of messages, False otherwise (default False) :return: None \"\"\" if", "if key.startswith(' '.join(user_input[1:])): user_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError: if", "def hours_conversation(conversation, delta=0.0): \"\"\" Shows chart of average number of messages send in", "def hours_plot(messages, delta): \"\"\" Shows chart of average number of messages grouped by", "None \"\"\" if conversation is None: daily_chats(difference) else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read())", "data generated by the get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source) data_source['total']", "not found, try again.') while True: user_input = input('>').split(' ') if user_input[0] ==", "# print(' [specific user, day difference]') print(' daily [name, -h] - daily messages')", "conversation): \"\"\" Prints messages statistics for specific conversation of given data source. :param", "count messages differently (default 0.0) :return: dictionary of number of messages grouped by", "'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): hours_conversation(key, difference) break else: print('Conversation", "counts all messages and saves to messages.json') print(' chars - counts all characters", "pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.savefig('messages.pdf') plt.show() if __name__=='__main__': while True: filename =", "'r', encoding='utf-8').read()) for key in data.keys(): if key.startswith(user_input[1]): characters_conversation_statistics(data, key) break else: print('Conversation", "time shift by and show statistics differently (default 0.0) :return: None \"\"\" interval_plot(interval_count(conversation,", "user_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError: if input('Messages not counted.", "== '-h': try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input) > 1: for", "data generated by the get_data() function :param conversation: conversation id or None for", "or key from get_data() function :param delta: number of hours to time shift", "get_data() function :return: None \"\"\" pass # User statistics def user_statistics(data_source, user_name): \"\"\"", "conversation id, or key from get_data() function :return: None \"\"\" pass # User", "from get_data() function :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.year)) def yearly_chats(): \"\"\"", "messages = pd.Series(messages).sort_index() print(messages.describe()) plt.bar(messages.index, messages) plt.savefig('messages.pdf') plt.show() # Hours def hours(difference, conversation=None):", "if key.lower().startswith(conversation.lower()): yearly_conversation(key) break else: print('Conversation not found.') def yearly_conversation(conversation): \"\"\" Shows chart", "+= 1 frame = pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_' + str(i) +", "from matplotlib import pyplot as plt # Getting data def set_source(filename): \"\"\" Sets", "user_statistics(data_source, user_name): \"\"\" Prints detailed statistics for specific person of given data source.", "def count(chars=False): \"\"\" Counts messages or characters from messages and saves output to", "user_input[0] == 'stats': if len(user_input) > 2 and user_input[2] == '-c': try: data", "chart of number of messages per year. :param conversation: conversation id or None", "None: daily_chats(difference) else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if", "-h] - hour distribution of messages') print(' [specific user, hours difference]') print(' help", "statistics from all conversations (default None) :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]')))", "Count messages?[y/n] ').lower() == 'y': count_messages() if user_input[0] == 'user': if len(user_input) >", "differently :return: None \"\"\" messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.xticks(list(range(24)), [f'{x", "otherwise None \"\"\" try: data = json.loads(open('messages_chars.json' if chars else 'messages.json', 'r', encoding='utf-8').read())", "get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source).fillna(0).astype('int') pd.set_option('display.max_rows', None) total_values = data_source.loc['total'].sort_values(ascending=False)", "1: for key in data.keys(): if key.startswith(user_input[1]): if len(user_input) < 3: daily_conversation(key) else:", "or key from get_data() function :return: None \"\"\" pass # User statistics def", "based on the beginning of the string. :param conversation: beginning of the conversation", "x: x.dt.date, delta) interval_plot(messages) # Monthly (not working) def monthly_conversation(conversation): # TODO not", "hours(difference, conversation=None): \"\"\" Shows chart of average number of messages send by hour", "and saves to messages.json') print(' chars - counts all characters and saves to", "dictionary of number of messages grouped by timeframe :return: None \"\"\" messages =", "* 60)):02}' for x in range(-(-math.floor(delta) % 24), math.floor(delta) % 24 if math.floor(delta)", "daily_chats(delta=0.0): \"\"\" Shows chart of number of messages per day across all conversation.", "'Messages not counted.') # Counting messages and characters def count_messages(): \"\"\" Counts messages", "conversation, character statistics]') print(' user [name] - detailed statistics for specific user') print('", "len(user_input) > 1 and not user_input[1] == '-c': try: data = json.loads(open('messages.json', 'r',", "show statistics differently :param conversation: conversation id or None for statistics from all", "the get_data() function :param user_name: person name, or key from get_data() function :return:", "output, ensure_ascii=False) def count(chars=False): \"\"\" Counts messages or characters from messages and saves", "dictionary containing prepared data generated by the get_data() function :param conversation: conversation id", "name that contains requested messages (usually conversation id) :param function: pandas function that", "characters?[y/n] ').lower() == 'y': count_characters() elif len(user_input) > 1 and not user_input[1] ==", "yearly_chats() else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()):", "pandas function that returns requested time part :param delta: number of hours to", "the day. :param difference: number of hours to time shift by and show", "get_data() function :param user_name: person name, or key from get_data() function :return: None", "interval_count(sender, lambda x: x.dt.hour, delta) hours_plot(messages, delta) def hours_plot(messages, delta): \"\"\" Shows chart", "encoding='utf-8').read()) if user: data = pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if key.lower().startswith(conversation.lower()): return", "def count_messages(): \"\"\" Counts messages and saves output to messages.json. :return: None \"\"\"", "year from the beginning of the conversation. :param conversation: conversation id, or key", "pandas as pd from matplotlib import pyplot as plt # Getting data def", "messages send in specific conversation by hour throughout the day. :param conversation: conversation", "requested directory messages += collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads( source.open('messages/inbox/' + inbox_name + '/message_' + str(i) +", "[name] - detailed statistics for specific user') print(' yearly [name] - yearly messages')", "filename[1] == ':'\\ else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try: source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read()))", "count_messages() elif len(user_input) > 1 and user_input[1] == '-c': try: data = json.loads(open('messages_chars.json',", "not counted. Count messages?[y/n] ').lower() == 'y': count_messages() elif len(user_input) > 1 and", "conversation id or None for overall statistics (default None) :param chars: True for", "collections.Counter() for sender in {x.split('/')[2] for x in source.namelist() if (x.endswith('/') and x.startswith('messages/inbox/')", "lambda x: x.dt.date, delta)) def daily_chats(delta=0.0): \"\"\" Shows chart of number of messages", "time shift by and show statistics differently (default 0.0) :return: None \"\"\" hours_plot(interval_count(conversation,", "characters, False for counting messages (default False) :return: None \"\"\" if chars: count_characters()", "delta: number of hours to time shift by and show statistics differently :return:", "found.') def hours_conversation(conversation, delta=0.0): \"\"\" Shows chart of average number of messages send", "for sender in {x.split('/')[2] for x in source.namelist() if (x.endswith('/') and x.startswith('messages/inbox/') and", "+= collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads( source.open('messages/inbox/' + inbox_name + '/message_' + str(i) + '.json').read())[ 'messages']).iloc[:, 1],", "None \"\"\" messages = pd.Series(messages).sort_index() print(messages.describe()) plt.bar(messages.index, messages) plt.savefig('messages.pdf') plt.show() # Hours def", "hours_plot(messages, delta): \"\"\" Shows chart of average number of messages grouped by hour", "print(messages.describe()) plt.bar(messages.index, messages) plt.savefig('messages.pdf') plt.show() # Hours def hours(difference, conversation=None): \"\"\" Shows chart", "\"\"\" Prints characters statistics for specific conversation of given data source. :param data_source:", "all conversations by hour throughout the day. :param delta: number of hours to", "(default 0.0) :return: dictionary of number of messages grouped by timeframe \"\"\" messages,", "dictionary of number of messages grouped by timeframe \"\"\" messages, i = collections.Counter(),", "key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation not found.') return None, None if conversation", "FileNotFoundError: if input('Characters not counted. Count characters?[y/n] ').lower() == 'y': count_characters() else: try:", "frame = pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())['messages']) frame['counted']", "except KeyError: break total[sender] = dict(counted_all) with open('messages_chars.json', 'w', encoding='utf-8') as output: json.dump(total,", "of messages grouped by timeframe :return: None \"\"\" messages = pd.Series(messages).sort_index() print(messages.describe()) plt.bar(messages.index,", "data source. :param data_source: dictionary containing prepared data generated by the get_data() function", "print(' help - displays this help prompt') print(' exit - exits the program')", "delta): \"\"\" Shows chart of average number of messages grouped by hour throughout", "!= 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]')) interval_plot(messages) # Yearly def yearly(conversation=None):", "input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: monthly_chats() if user_input[0]", "break else: print('Conversation not found.') else: print('Please specify conversation.') except FileNotFoundError: if input('Messages", "sender in senders: counted_all, i = collections.Counter(), 0 while True: try: i +=", "\"\"\" filename = f'file:///{filename}' if filename[1] == ':' \\ else (f'file:./{filename}' if filename.endswith('.zip')", "monthly_chats() if user_input[0] == 'yearly': if len(user_input) > 1: try: data = json.loads(open('messages.json',", "else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): yearly_conversation(key)", "user_name: person name, or key from get_data() function :return: None \"\"\" data_source =", "with open('messages.json', 'w', encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False) def count_characters(): \"\"\" Counts", "len(user_input) > 1: for key in data.keys(): if key.startswith(user_input[1]): monthly_conversation(key) else: print('Conversation not", "of messages grouped by hour throughout the day. :param messages: dictionary of number", "statistics differently (default 0.0) :return: None \"\"\" messages = collections.Counter() for sender in", "True for character statistics instead of messages, False otherwise (default False) :return: None", "of messages per day across all conversation. :param delta: number of hours to", "i += 1 messages += collections.Counter(pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_' + str(i)", "number of hours to time shift by and show statistics differently (default 0.0)", "pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.xticks(list(range(24)), [f'{x % 24}:{int(abs((delta - int(delta)) * 60)):02}'", "try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.startswith(user_input[1]): conversation_statistics(data,", "else: messages_statistics(data_source) else: if chars: raise NotImplementedError() else: print(conversation) conversation_statistics(data_source, conversation) def messages_statistics(data_source):", "by and show statistics differently (default 0.0) :return: None \"\"\" hours_plot(interval_count(conversation, lambda x:", "except KeyError: break total[sender] = {k.encode('iso-8859-1').decode('utf-8'): v for k, v in messages.items()} total[sender]['total']", "yearly_conversation(key) break else: print('Conversation not found.') else: print('Please specify conversation.') except FileNotFoundError: if", "len(user_input) > 1: for key in data.keys(): if key.startswith(user_input[1]): yearly_conversation(key) break else: print('Conversation", "for sender in senders: messages, i = collections.Counter(), 0 while True: try: i", "chars: True for character statistics instead of messages, False otherwise (default False) :return:", ">>> set_source('facebook-YourName.zip') Absolute path (works only on Windows) >>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip') \"\"\" filename =", "if input('Characters not counted. Count characters?[y/n] ').lower() == 'y': count_characters() else: try: data", "chart of number of messages per day from the beginning of the conversation.", "key) break else: print('Conversation not found.') except FileNotFoundError: if input('Messages not counted. Count", "all characters and saves to messages_chars.json') print(' stats [conversation, -c] - displays statistics", "user, hours difference]') print(' help - displays this help prompt') print(' exit -", "not counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: print('Please specify user name.')", "= data_source.loc[user_name] data_source = data_source[data_source > 0].sort_values(ascending=False) data_source.index = data_source.index.map(lambda x: x.split('_')[0][:30]) pd.set_option('display.max_rows',", "show statistics differently (default 0.0) :return: None \"\"\" messages = collections.Counter() for sender", "{}, {x.split('/')[2] for x in namelist if (x.endswith('/') and x.startswith('messages/inbox/') and x !=", "(x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')} for sender in senders: counted_all, i", "math.floor(delta) % 24 != 0 else 24)], rotation=90) plt.xlim(-1, 24) plt.savefig('messages.pdf') plt.show() #", "!= 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.year) messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe())", "all conversations (default None) :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]'))) def monthly_chats():", "timeframe :return: None \"\"\" messages = pd.Series(messages).sort_index() print(messages.describe()) plt.bar(messages.index, messages) plt.savefig('messages.pdf') plt.show() #", "'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.hour, delta) hours_plot(messages, delta) def hours_plot(messages, delta):", "True: try: i += 1 messages += collections.Counter(pd.DataFrame(json.loads( source.open('messages/inbox/' + sender + '/message_'", "or None for overall statistics (default None) :param chars: True for counting chars", "json.loads(open('messages.json', 'r', encoding='utf-8').read()) messages_statistics(data) except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower()", "day. :param messages: dictionary of number of messages grouped by timeframe :param delta:", "') filename = f'file:///{filename}' if filename[1] == ':'\\ else (f'file:./{filename}' if filename.endswith('.zip') else", "None: for key in data.keys(): if key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation not", "time shift by and show statistics differently :param conversation: conversation id or None", "input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: print('Please specify user", "'.json').read())[ 'messages']).iloc[:, 1], unit='ms').dt.tz_localize('UTC').dt.tz_convert( 'Europe/Warsaw').add(pd.Timedelta(hours=-delta)))) except KeyError: break return messages def interval_plot(messages): \"\"\"", "if len(user_input) > 1 and not user_input[1] == '-h': try: data = json.loads(open('messages.json',", "i = collections.Counter(), 0 while True: try: i += 1 # iterates over", "encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): daily_conversation(key, difference) break else: print('Conversation not", "string. :param conversation: beginning of the conversation id or None for overall statistics", ":param conversation: conversation id, or key from get_data() function :param delta: number of", "requested time part :param delta: number of hours to time shift by and", "prepared data generated by the get_data() function :param user_name: person name, or key", "'.json').read())['messages']) frame['counted'] = frame.apply( lambda row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1) counted_all += sum(frame['counted'], collections.Counter()) except", "__name__=='__main__': while True: filename = input('Enter filename: ') filename = f'file:///{filename}' if filename[1]", "for statistics from all conversations (default None) :return: None \"\"\" interval_plot(interval_count(conversation, lambda x:", "hour distribution of messages') print(' [specific user, hours difference]') print(' help - displays", "for x in namelist if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')} for", "of average number of messages send across all conversations by hour throughout the", "all conversation. :return: None \"\"\" messages = collections.Counter() for sender in {x.split('/')[2] for", "'y': count_messages() else: yearly_chats() if user_input[0] == 'hours': if len(user_input) > 1 and", "def yearly_conversation(conversation): \"\"\" Shows chart of number of messages per year from the", "part :param delta: number of hours to time shift by and count messages", "to messages.json') print(' chars - counts all characters and saves to messages_chars.json') print('", "\"\"\" if conversation is None: if chars: characters_statistics(data_source) else: messages_statistics(data_source) else: if chars:", "= json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): yearly_conversation(key) break else:", "'r', encoding='utf-8').read()) if len(user_input) > 1: for key in data.keys(): if key.startswith(user_input[1]): yearly_conversation(key)", "for key in data.keys(): if key.lower().startswith(conversation.lower()): hours_conversation(key, difference) break else: print('Conversation not found.')", "not found.') return None, None else: return data, None except FileNotFoundError: logging.error('Characters not", "statistics differently :param conversation: conversation id or None for statistics from all conversations", "[name] - yearly messages') print(' [specific user]') # print(' monthly [name, -d] -", "').lower() == 'y': count_messages() elif len(user_input) > 1 and user_input[1] == '-c': try:", "user: data = pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if key.lower().startswith(conversation.lower()): return data, key", "found.') except FileNotFoundError: if input('Characters not counted. Count characters?[y/n] ').lower() == 'y': count_characters()", "saves output to messages_chars.json. :return: None \"\"\" namelist = source.namelist() total, senders =", "def hours_chats(delta=0.0): \"\"\" Shows chart of average number of messages send across all", "messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.xticks(list(range(24)), [f'{x % 24}:{int(abs((delta - int(delta))", "if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() if user_input[0] ==", "\"\"\" pass # User statistics def user_statistics(data_source, user_name): \"\"\" Prints detailed statistics for", "else: print('Conversation not found.') def hours_conversation(conversation, delta=0.0): \"\"\" Shows chart of average number", "key in data.keys(): if key.lower().startswith(conversation.lower()): daily_conversation(key, difference) break else: print('Conversation not found.') def", "None for statistics from all conversations (default None) :return: None \"\"\" if conversation", "print('Conversation not found.') except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() ==", "x.split('_')[0][:30]) pd.set_option('display.max_rows', None) print(user_name, 'statistics:') print(data_source) # Intervals def interval_count(inbox_name, function, delta=0.0): \"\"\"", "conversation id, or key from get_data() function :return: None \"\"\" interval_plot(interval_count(conversation, lambda x:", "number of messages grouped by timeframe \"\"\" messages, i = collections.Counter(), 0 while", "across all conversations by hour throughout the day. :param delta: number of hours", "statistics for counted messages') print(' [detailed statistics for specific conversation, character statistics]') print('", "(default None) :param chars: True for character statistics instead of messages, False otherwise", "in data.keys(): if key.startswith(user_input[1]): characters_conversation_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError:", "user_input[1] == '-h': daily_chats(float(user_input[2])) else: daily_chats() if user_input[0] == 'monthly': if len(user_input) >", "chart of average number of messages grouped by hour throughout the day. :param", "!= 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.date, delta) interval_plot(messages) # Monthly (not", "- counts all messages and saves to messages.json') print(' chars - counts all", "not found, try again.') def get_data(conversation=None, chars=False, user=False): \"\"\" Reads data from messages.json", "conversations by hour throughout the day. :param delta: number of hours to time", "print(' [specific user, day difference]') print(' daily [name, -h] - daily messages') print('", "the downloaded .zip file :return: None You can provide relative path to file", "data_source = pd.DataFrame(data_source) data_source['total'] = data_source.sum(axis=1) data_source = data_source.iloc[:, -1] data_source = data_source.sort_values(ascending=False).astype('int')", "else: monthly_chats() if user_input[0] == 'yearly': if len(user_input) > 1: try: data =", "logging.error('Conversation not found.') return None, None else: return data, None except FileNotFoundError: logging.error('Characters", "and show statistics differently (default 0.0) :return: None \"\"\" hours_plot(interval_count(conversation, lambda x: x.dt.hour,", "data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): yearly_conversation(key) break", "or None for statistics from all conversations (default None) :return: None \"\"\" interval_plot(interval_count(conversation,", "FileNotFoundError: logging.error('Characters not counted.' if chars else 'Messages not counted.') # Counting messages", "displays this help prompt') print(' exit - exits the program') if user_input[0] ==", ":return: None \"\"\" if conversation is None: daily_chats(difference) else: data = json.loads(open('messages.json', 'r',", "statistics for specific conversation of given data source. :param data_source: dictionary containing prepared", "and x.startswith('messages/inbox/') and x != 'messages/inbox/')} for sender in senders: counted_all, i =", "Getting data def set_source(filename): \"\"\" Sets source global variable to the path of", "generated by the get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source).fillna(0).astype('int') pd.set_option('display.max_rows', None)", "str(i) + '.json').read())[ 'messages']).iloc[:, 1], unit='ms').dt.tz_localize('UTC').dt.tz_convert( 'Europe/Warsaw').add(pd.Timedelta(hours=-delta)))) except KeyError: break return messages def", "break else: print('Conversation not found.') except FileNotFoundError: if input('Characters not counted. Count characters?[y/n]", "user_input[0] == 'exit': break if user_input[0] == '' or user_input[0] == 'count': count_messages()", "Prints characters statistics for specific conversation of given data source. :param data_source: dictionary", "key from get_data() function :return: None \"\"\" pass # User statistics def user_statistics(data_source,", ":return: None \"\"\" data_source = data_source.loc[user_name] data_source = data_source[data_source > 0].sort_values(ascending=False) data_source.index =", "iterates over all .json files in requested directory messages += collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads( source.open('messages/inbox/' +", "number of messages per month. :param conversation: conversation id or None for statistics", "number of messages per year. :param conversation: conversation id or None for statistics", "KeyError: break return messages def interval_plot(messages): \"\"\" Shows chart based on previously defined", "found.') except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages()", "else: logging.error('Conversation not found.') return None, None if conversation is not None: for", "conversation id, or key from get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source)", "hours to time shift by and show statistics differently (default 0.0) :return: None", "urlopen import pandas as pd from matplotlib import pyplot as plt # Getting", "(default 0.0) :return: None \"\"\" messages = collections.Counter() for sender in {x.split('/')[2] for", "else: print('Conversation not found.') except FileNotFoundError: if input('Characters not counted. Count characters?[y/n] ').lower()", "and saves output to the file. :param chars: True for counting characters, False", "if conversation is None: if chars: characters_statistics(data_source) else: messages_statistics(data_source) else: if chars: raise", "monthly_conversation(conversation): # TODO not working charts for monthly \"\"\" Shows chart of number", "False otherwise (default False) :return: None \"\"\" if conversation is None: if chars:", "').lower() == 'y': count_messages() else: print('Please specify user name.') if user_input[0] == 'daily':", "None: hours_chats(difference) else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if", "# Hours def hours(difference, conversation=None): \"\"\" Shows chart of average number of messages", "else: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) messages_statistics(data) except FileNotFoundError: if input('Messages not", "print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.xticks(list(range(24)), [f'{x % 24}:{int(abs((delta - int(delta)) * 60)):02}' for x", "x != 'messages/inbox/')} for sender in senders: messages, i = collections.Counter(), 0 while", "or user_input[0] == '?': print('Messenger Counter available commands:') print(' count - counts all", "generated by the get_data() function :param conversation: conversation id, or key from get_data()", "# Counting messages and characters def count_messages(): \"\"\" Counts messages and saves output", "year. :param conversation: conversation id or None for statistics from all conversations (default", "+ sender + '/message_' + str(i) + '.json').read())[ 'messages']).iloc[:, 0]) except KeyError: break", "hours to time shift by and show statistics differently :param conversation: conversation id", "input('Enter filename: ') filename = f'file:///{filename}' if filename[1] == ':'\\ else (f'file:./{filename}' if", "if user_input[0] == 'monthly': if len(user_input) > 1: try: data = json.loads(open('messages.json', 'r',", "input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() if user_input[0] == 'user':", "== '-c': try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) characters_statistics(data) except FileNotFoundError: if input('Characters", "all messages and saves to messages.json') print(' chars - counts all characters and", "1 # iterates over all .json files in requested directory messages += collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads(", "in data.keys(): if key.startswith(user_input[1]): monthly_conversation(key) else: print('Conversation not found.') else: print('Please specify conversation.')", "not found.') except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':", "and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.year) messages = pd.DataFrame(messages,", "messages (usually conversation id) :param function: pandas function that returns requested time part", "global source source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) except URLError: logging.error('File not found, try again.') def", "% 24 != 0 else 24)], rotation=90) plt.xlim(-1, 24) plt.savefig('messages.pdf') plt.show() # Daily", "{x.split('/')[2] for x in source.namelist() if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}:", "messages: dictionary of number of messages grouped by timeframe :return: None \"\"\" messages", "else 'Messages not counted.') # Counting messages and characters def count_messages(): \"\"\" Counts", "+= interval_count(sender, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]')) interval_plot(messages) # Yearly def yearly(conversation=None): \"\"\" Shows chart", "and show statistics differently (default 0.0) :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.date,", "'-c': try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.startswith(user_input[1]):", "[specific user, day difference]') print(' daily [name, -h] - daily messages') print(' [specific", "= frame.apply( lambda row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1) counted_all += sum(frame['counted'], collections.Counter()) except KeyError: break", "specific conversation by hour throughout the day. :param conversation: conversation id, or key", "open('messages_chars.json', 'w', encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False) def count(chars=False): \"\"\" Counts messages", "given data source. :param data_source: dictionary containing prepared data generated by the get_data()", "[specific user, hours difference]') print(' hours [name, -h] - hour distribution of messages')", "detailed statistics for specific user') print(' yearly [name] - yearly messages') print(' [specific", "characters_conversation_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError: if input('Characters not counted.", "24}:{int(abs((delta - int(delta)) * 60)):02}' for x in range(-(-math.floor(delta) % 24), math.floor(delta) %", "characters_conversation_statistics(data_source, conversation): \"\"\" Prints characters statistics for specific conversation of given data source.", "Counts number of messages based on given timeframe function :param inbox_name: directory name", "for key in data.keys(): if key.startswith(user_input[1]): if len(user_input) < 3: hours_conversation(key) else: hours_conversation(key,", "dictionary containing prepared data generated by the get_data() function :return: None \"\"\" data_source", "inbox_name: directory name that contains requested messages (usually conversation id) :param function: pandas", "counting chars in messages_chars.json, False for counting messages in messages.json (default False) :param", "break total[sender] = {k.encode('iso-8859-1').decode('utf-8'): v for k, v in messages.items()} total[sender]['total'] = sum(messages.values())", "\"\"\" Prints detailed statistics for specific person of given data source. :param data_source:", "- daily messages') print(' [specific user, hours difference]') print(' hours [name, -h] -", "detailed statistics for specific person of given data source. :param data_source: dictionary containing", ".json files in requested directory messages += collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads( source.open('messages/inbox/' + inbox_name + '/message_'", "for counting messages in messages.json (default False) :param user: True for user name", "!= 'messages/inbox/')} for sender in senders: counted_all, i = collections.Counter(), 0 while True:", "print(' [specific user, hours difference]') print(' help - displays this help prompt') print('", "-c] - displays statistics for counted messages') print(' [detailed statistics for specific conversation,", "Counts messages and saves output to messages.json. :return: None \"\"\" namelist = source.namelist()", "def daily_chats(delta=0.0): \"\"\" Shows chart of number of messages per day across all", "':' \\ else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try: global source source =", "\"\"\" Shows chart of number of messages per month. :param conversation: conversation id", "specific conversation, otherwise None \"\"\" try: data = json.loads(open('messages_chars.json' if chars else 'messages.json',", "not None: for key in data.keys(): if key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation", "not found.') return None, None if conversation is not None: for key in", "messages grouped by timeframe \"\"\" messages, i = collections.Counter(), 0 while True: try:", "characters_statistics(data_source): \"\"\" Prints characters statistics of given data source. :param data_source: dictionary containing", "else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): hours_conversation(key,", "= json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): hours_conversation(key, difference) break", "user_input[0] == 'yearly': if len(user_input) > 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read())", "(not working) def monthly_conversation(conversation): # TODO not working charts for monthly \"\"\" Shows", "chart of number of messages per day. :param difference: number of hours to", "conversation. :param delta: number of hours to time shift by and show statistics", "encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False) def count(chars=False): \"\"\" Counts messages or characters", "Prints messages overall statistics of given data source. :param data_source: dictionary containing prepared", "24)], rotation=90) plt.xlim(-1, 24) plt.savefig('messages.pdf') plt.show() # Daily def daily(difference, conversation=None): \"\"\" Shows", "chart of number of messages per month. :param conversation: conversation id or None", "hour throughout the day. :param difference: number of hours to time shift by", "yearly_conversation(key) break else: print('Conversation not found.') def yearly_conversation(conversation): \"\"\" Shows chart of number", "if chars: raise NotImplementedError() else: print(conversation) conversation_statistics(data_source, conversation) def messages_statistics(data_source): \"\"\" Prints messages", "def monthly_chats(): \"\"\" Shows chart of number of messages per month across all", ":return: None \"\"\" namelist = source.namelist() total, senders = {}, {x.split('/')[2] for x", "data_source[data_source > 0].sort_values(ascending=False) data_source.index = data_source.index.map(lambda x: x.split('_')[0][:30]) pd.set_option('display.max_rows', None) print(user_name, 'statistics:') print(data_source)", "for x in source.namelist() if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages", "input('Characters not counted. Count characters?[y/n] ').lower() == 'y': count_characters() else: try: data =", "again.') while True: user_input = input('>').split(' ') if user_input[0] == 'exit': break if", "again.') def get_data(conversation=None, chars=False, user=False): \"\"\" Reads data from messages.json or messages_chars.json and", "user_input[1] == '-c': try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys():", "timeframe function :param inbox_name: directory name that contains requested messages (usually conversation id)", "return data, None except FileNotFoundError: logging.error('Characters not counted.' if chars else 'Messages not", "== 'exit': break if user_input[0] == '' or user_input[0] == 'count': count_messages() if", "Counting messages and characters def count_messages(): \"\"\" Counts messages and saves output to", "not user_input[1] == '-c': try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in", "0].sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) def characters_statistics(data_source): \"\"\" Prints characters statistics of given data", "if conversation is None: yearly_chats() else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key", "You can provide relative path to file >>> set_source('facebook-YourName.zip') Absolute path (works only", "messages based on given timeframe function :param inbox_name: directory name that contains requested", "# TODO characters conversation statistics def characters_conversation_statistics(data_source, conversation): \"\"\" Prints characters statistics for", "Windows) >>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip') \"\"\" filename = f'file:///{filename}' if filename[1] == ':' \\ else", "!= 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.hour, delta) hours_plot(messages, delta) def hours_plot(messages,", "differently (default 0.0) :return: None \"\"\" messages = collections.Counter() for sender in {x.split('/')[2]", "= json.loads(open('messages.json', 'r', encoding='utf-8').read()) data = pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if key.startswith('", "id, False otherwise (default False) :return: dictionary containing the data and if applicable", "(default False) :return: None \"\"\" if conversation is None: if chars: characters_statistics(data_source) else:", "hour throughout the day. :param messages: dictionary of number of messages grouped by", "hours difference]') print(' help - displays this help prompt') print(' exit - exits", "id or None for overall statistics (default None) :param chars: True for counting", "pd.DataFrame(data_source).fillna(0).astype('int') pd.set_option('display.max_rows', None) total_values = data_source.loc['total'].sort_values(ascending=False) print(total_values) print(total_values.describe()) total_values = total_values.sort_values() plt.rcdefaults() plt.barh(total_values.index.astype(str).str[:10][-20:],", "dictionary containing the data and if applicable a key pointing to a specific", "key.lower().startswith(conversation.lower()): yearly_conversation(key) break else: print('Conversation not found.') def yearly_conversation(conversation): \"\"\" Shows chart of", "data.keys(): if key.startswith(user_input[1]): yearly_conversation(key) break else: print('Conversation not found.') else: print('Please specify conversation.')", "characters?[y/n] ').lower() == 'y': count_characters() else: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) messages_statistics(data)", "statistics differently (default 0.0) :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.date, delta)) def", "= json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.startswith(user_input[1]): conversation_statistics(data, key) break", "count_messages() else: yearly_chats() if user_input[0] == 'hours': if len(user_input) > 1 and not", "conversation is None: daily_chats(difference) else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in", "conversation by hour throughout the day. :param conversation: conversation id, or key from", "by the get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source) data_source['total'] = data_source.sum(axis=1)", "messages = collections.Counter() for sender in {x.split('/')[2] for x in source.namelist() if (x.endswith('/')", "hours_chats(delta=0.0): \"\"\" Shows chart of average number of messages send across all conversations", "messages += collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads( source.open('messages/inbox/' + inbox_name + '/message_' + str(i) + '.json').read())[ 'messages']).iloc[:,", "key from get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source) data_source = data_source.loc[:,", "float(user_input[2])) break else: print('Conversation not found.') else: print('Please specify conversation.') except FileNotFoundError: if", "else: logging.error('Conversation not found.') return None, None else: return data, None except FileNotFoundError:", "user name.') if user_input[0] == 'daily': if len(user_input) > 1 and not user_input[1]", "plt.savefig('messages.pdf') plt.show() # Hours def hours(difference, conversation=None): \"\"\" Shows chart of average number", "len(user_input) > 2 and user_input[2] == '-c': try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read())", "chart of average number of messages send by hour throughout the day. :param", "delta) def hours_chats(delta=0.0): \"\"\" Shows chart of average number of messages send across", "if applicable a key pointing to a specific conversation, otherwise None \"\"\" try:", "chart of number of messages per month across all conversation. :return: None \"\"\"", "None \"\"\" pass # User statistics def user_statistics(data_source, user_name): \"\"\" Prints detailed statistics", "all conversations (default None) :return: None \"\"\" if conversation is None: daily_chats(difference) else:", "saves to messages_chars.json') print(' stats [conversation, -c] - displays statistics for counted messages')", "function, delta=0.0): \"\"\" Counts number of messages based on given timeframe function :param", "+ '.json').read())[ 'messages']).iloc[:, 1], unit='ms').dt.tz_localize('UTC').dt.tz_convert( 'Europe/Warsaw').add(pd.Timedelta(hours=-delta)))) except KeyError: break return messages def interval_plot(messages):", "in data.keys(): if key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation not found.') return None,", "1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) data = pd.DataFrame(data).fillna(0).astype('int') for key in", "messages def interval_plot(messages): \"\"\" Shows chart based on previously defined timeframe :param messages:", "from urllib.request import urlopen import pandas as pd from matplotlib import pyplot as", "def count_characters(): \"\"\" Counts characters from messages and saves output to messages_chars.json. :return:", "\"\"\" messages, i = collections.Counter(), 0 while True: try: i += 1 #", "URLError: logging.error('File not found, try again.') def get_data(conversation=None, chars=False, user=False): \"\"\" Reads data", "of messages based on given timeframe function :param inbox_name: directory name that contains", "> 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) data = pd.DataFrame(data).fillna(0).astype('int') for key", "conversation. :param conversation: conversation id, or key from get_data() function :param delta: number", "24 if math.floor(delta) % 24 != 0 else 24)], rotation=90) plt.xlim(-1, 24) plt.savefig('messages.pdf')", "None \"\"\" data_source = pd.DataFrame(data_source) data_source = data_source.loc[:, conversation] data_source = data_source[data_source >", "of hours to time shift by and show statistics differently :param conversation: conversation", "dict(counted_all) with open('messages_chars.json', 'w', encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False) def count(chars=False): \"\"\"", ".zip file :return: None You can provide relative path to file >>> set_source('facebook-YourName.zip')", "\"\"\" Shows chart of average number of messages send in specific conversation by", "chart of number of messages per year across all conversation. :return: None \"\"\"", "by the get_data() function :param conversation: conversation id or None for overall statistics", "conversation: conversation id, or key from get_data() function :return: None \"\"\" pass #", "statistics for specific conversation, character statistics]') print(' user [name] - detailed statistics for", "data def set_source(filename): \"\"\" Sets source global variable to the path of .zip", "None \"\"\" hours_plot(interval_count(conversation, lambda x: x.dt.hour, delta), delta) def hours_chats(delta=0.0): \"\"\" Shows chart", "Statistics def statistics(data_source, conversation=None, chars=False): \"\"\" Prints statistics of given data source. :param", "plt # Getting data def set_source(filename): \"\"\" Sets source global variable to the", "messages_statistics(data) except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages()", "if user_input[0] == 'exit': break if user_input[0] == '' or user_input[0] == 'count':", "for key in data.index: if key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation not found.')", "characters_statistics(data_source) else: messages_statistics(data_source) else: if chars: raise NotImplementedError() else: print(conversation) conversation_statistics(data_source, conversation) def", "while True: try: i += 1 messages += collections.Counter(pd.DataFrame(json.loads( source.open('messages/inbox/' + sender +", "id, or key from get_data() function :param delta: number of hours to time", "index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.savefig('messages.pdf') plt.show() if __name__=='__main__': while True: filename = input('Enter", "None: yearly_chats() else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if", "> 1 and not user_input[1] == '-c': try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read())", "source.namelist() if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender, lambda", "1 and user_input[1] == '-c': try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) characters_statistics(data) except", "messages.json. :return: None \"\"\" namelist = source.namelist() total, senders = {}, {x.split('/')[2] for", "f'file:./{filename}.zip') try: source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) break except URLError: print('File not found, try again.')", "'w', encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False) def count_characters(): \"\"\" Counts characters from", "\"\"\" Shows chart of average number of messages send by hour throughout the", "daily_conversation(key) else: daily_conversation(key, float(user_input[2])) break else: print('Conversation not found.') else: print('Please specify conversation.')", "of messages, False otherwise (default False) :return: None \"\"\" if conversation is None:", "None \"\"\" data_source = pd.DataFrame(data_source).fillna(0).astype('int') pd.set_option('display.max_rows', None) total_values = data_source.loc['total'].sort_values(ascending=False) print(total_values) print(total_values.describe()) total_values", "Shows chart of number of messages per month across all conversation. :return: None", "commands:') print(' count - counts all messages and saves to messages.json') print(' chars", "conversation: beginning of the conversation id or None for overall statistics (default None)", "for key in data.keys(): if key.startswith(user_input[1]): monthly_conversation(key) else: print('Conversation not found.') else: print('Please", "data_source.index = data_source.index.map(lambda x: x.split('_')[0][:30]) pd.set_option('display.max_rows', None) print(user_name, 'statistics:') print(data_source) # Intervals def", "x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.date, delta) interval_plot(messages)", "json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.startswith(user_input[1]): characters_conversation_statistics(data, key) break else:", "else: hours_conversation(key, float(user_input[2])) break else: print('Conversation not found.') else: print('Please specify conversation.') except", "per month. :param conversation: conversation id or None for statistics from all conversations", "day difference]') print(' daily [name, -h] - daily messages') print(' [specific user, hours", "chars: True for counting chars in messages_chars.json, False for counting messages in messages.json", "conversation is not None: for key in data.keys(): if key.lower().startswith(conversation.lower()): return data, key", "= source.namelist() total, senders = {}, {x.split('/')[2] for x in namelist if (x.endswith('/')", "senders = {}, {x.split('/')[2] for x in namelist if (x.endswith('/') and x.startswith('messages/inbox/') and", "containing prepared data generated by the get_data() function :param conversation: conversation id or", "plt.show() if __name__=='__main__': while True: filename = input('Enter filename: ') filename = f'file:///{filename}'", "daily_conversation(key, float(user_input[2])) break else: print('Conversation not found.') else: print('Please specify conversation.') except FileNotFoundError:", "based on given timeframe function :param inbox_name: directory name that contains requested messages", "delta=0.0): \"\"\" Shows chart of average number of messages send in specific conversation", "x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.date, delta) interval_plot(messages) # Monthly", "messages_chars.json') print(' stats [conversation, -c] - displays statistics for counted messages') print(' [detailed", "\\ else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try: global source source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read()))", "for specific user') print(' yearly [name] - yearly messages') print(' [specific user]') #", "(x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]'))", "elif len(user_input) > 1 and not user_input[1] == '-c': try: data = json.loads(open('messages.json',", "encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False) def count_characters(): \"\"\" Counts characters from messages", "specify conversation.') except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':", "conversations (default None) :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]'))) def monthly_chats(): \"\"\"", "set_source(filename): \"\"\" Sets source global variable to the path of .zip file. :param", "'messages/inbox/')} for sender in senders: counted_all, i = collections.Counter(), 0 while True: try:", "= data_source.sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) print(f'Total characters: {data_source.sum()}') # TODO characters conversation statistics", "user_name): \"\"\" Prints detailed statistics for specific person of given data source. :param", "time shift by and count messages differently (default 0.0) :return: dictionary of number", "None) :param chars: True for character statistics instead of messages, False otherwise (default", "conversations (default None) :return: None \"\"\" if conversation is None: hours_chats(difference) else: data", "if user_input[0] == 'user': if len(user_input) > 1: try: data = json.loads(open('messages.json', 'r',", "get_data() function :param conversation: conversation id, or key from get_data() function :return: None", "\"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.year)) def yearly_chats(): \"\"\" Shows chart of number of", "throughout the day. :param messages: dictionary of number of messages grouped by timeframe", "= data_source.sum(axis=1) data_source = data_source.iloc[:, -1] data_source = data_source.sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) print(f'Total", "= data_source.loc[:, conversation] data_source = data_source[data_source > 0].sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) def characters_statistics(data_source):", "lambda x: x.dt.year) messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.savefig('messages.pdf') plt.show() if", "else: daily_conversation(key, float(user_input[2])) break else: print('Conversation not found.') else: print('Please specify conversation.') except", ":return: None \"\"\" if conversation is None: yearly_chats() else: data = json.loads(open('messages.json', 'r',", "encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): yearly_conversation(key) break else: print('Conversation not found.')", "the conversation. :param conversation: conversation id, or key from get_data() function :return: None", "in data.keys(): if key.startswith(user_input[1]): conversation_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError:", "except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() else:", "= input('>').split(' ') if user_input[0] == 'exit': break if user_input[0] == '' or", "for user name instead of conversation id, False otherwise (default False) :return: dictionary", "plt.savefig('messages.pdf') plt.show() # Daily def daily(difference, conversation=None): \"\"\" Shows chart of number of", "\"\"\" if chars: count_characters() else: count_messages() # Statistics def statistics(data_source, conversation=None, chars=False): \"\"\"", "Counts messages or characters from messages and saves output to the file. :param", "1 and user_input[1] == '-h': daily_chats(float(user_input[2])) else: daily_chats() if user_input[0] == 'monthly': if", "Reads data from messages.json or messages_chars.json and finds key based on the beginning", "if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() elif len(user_input) >", "chars - counts all characters and saves to messages_chars.json') print(' stats [conversation, -c]", "daily [name, -h] - daily messages') print(' [specific user, hours difference]') print(' hours", "break else: print('Conversation not found.') except FileNotFoundError: if input('Messages not counted. Count messages?[y/n]", "and show statistics differently :return: None \"\"\" messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns,", "str(i) + '.json').read())['messages']) frame['counted'] = frame.apply( lambda row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1) counted_all += sum(frame['counted'],", "logging from urllib.error import URLError from urllib.request import urlopen import pandas as pd", "key.lower().startswith(conversation.lower()): daily_conversation(key, difference) break else: print('Conversation not found.') def daily_conversation(conversation, delta=0.0): \"\"\" Shows", "== 'chars': count_characters() if user_input[0] == 'help' or user_input[0] == '?': print('Messenger Counter", "monthly [name, -d] - monthly messages (available soon)') # print(' [specific user, day", "for counting chars in messages_chars.json, False for counting messages in messages.json (default False)", "function :param delta: number of hours to time shift by and show statistics", "None \"\"\" messages = collections.Counter() for sender in {x.split('/')[2] for x in source.namelist()", "json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): daily_conversation(key, difference) break else:", "== 'count': count_messages() if user_input[0] == 'chars': count_characters() if user_input[0] == 'help' or", "print('Please specify user name.') if user_input[0] == 'daily': if len(user_input) > 1 and", "and x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.hour, delta)", "Shows chart of number of messages per month. :param conversation: conversation id or", "of number of messages per year from the beginning of the conversation. :param", "Prints detailed statistics for specific person of given data source. :param data_source: dictionary", "').lower() == 'y': count_characters() elif len(user_input) > 1 and not user_input[1] == '-c':", "in data.index: if key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation not found.') return None,", "of number of messages grouped by timeframe :param delta: number of hours to", ":return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.year)) def yearly_chats(): \"\"\" Shows chart of", "program') if user_input[0] == 'stats': if len(user_input) > 2 and user_input[2] == '-c':", "user') print(' yearly [name] - yearly messages') print(' [specific user]') # print(' monthly", "count_characters() else: count_messages() # Statistics def statistics(data_source, conversation=None, chars=False): \"\"\" Prints statistics of", "print('Messenger Counter available commands:') print(' count - counts all messages and saves to", "len(user_input) > 1 and not user_input[1] == '-h': try: data = json.loads(open('messages.json', 'r',", "prepared data generated by the get_data() function :param conversation: conversation id, or key", "print(' monthly [name, -d] - monthly messages (available soon)') # print(' [specific user,", "can provide relative path to file >>> set_source('facebook-YourName.zip') Absolute path (works only on", "print(' count - counts all messages and saves to messages.json') print(' chars -", "hours to time shift by and count messages differently (default 0.0) :return: dictionary", "(default False) :param user: True for user name instead of conversation id, False", "not counted.' if chars else 'Messages not counted.') # Counting messages and characters", "messages per year across all conversation. :return: None \"\"\" messages = collections.Counter() for", "json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input) > 1: for key in data.keys(): if key.startswith(user_input[1]):", "pd.set_option('display.max_rows', None) print(data_source) def characters_statistics(data_source): \"\"\" Prints characters statistics of given data source.", "lambda row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1) counted_all += sum(frame['counted'], collections.Counter()) except KeyError: break total[sender] =", "elif len(user_input) > 1 and user_input[1] == '-c': try: data = json.loads(open('messages_chars.json', 'r',", "len(user_input) > 1 and user_input[1] == '-c': try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read())", "\"\"\" data_source = pd.DataFrame(data_source) data_source = data_source.loc[:, conversation] data_source = data_source[data_source > 0].sort_values(ascending=False).astype('int')", "conversation id, or key from get_data() function :param delta: number of hours to", "shift by and show statistics differently (default 0.0) :return: None \"\"\" interval_plot(interval_count(conversation, lambda", "Shows chart of average number of messages grouped by hour throughout the day.", "of messages per year across all conversation. :return: None \"\"\" messages = collections.Counter()", "> 1 and not user_input[1] == '-h': try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read())", "data, key else: logging.error('Conversation not found.') return None, None else: return data, None", "found, try again.') def get_data(conversation=None, chars=False, user=False): \"\"\" Reads data from messages.json or", "day from the beginning of the conversation. :param conversation: conversation id, or key", "for key in data.index: if key.startswith(' '.join(user_input[1:])): user_statistics(data, key) break else: print('Conversation not", "overall statistics (default None) :param chars: True for counting chars in messages_chars.json, False", "or key from get_data() function :return: None \"\"\" data_source = data_source.loc[user_name] data_source =", "'/message_' + str(i) + '.json').read())['messages']) frame['counted'] = frame.apply( lambda row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1) counted_all", "'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.year) messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns,", "# Yearly def yearly(conversation=None): \"\"\" Shows chart of number of messages per year.", "for key in data.keys(): if key.lower().startswith(conversation.lower()): yearly_conversation(key) break else: print('Conversation not found.') def", "of number of messages per year across all conversation. :return: None \"\"\" messages", "not counted. Count messages?[y/n] ').lower() == 'y': count_messages() if user_input[0] == 'user': if", "i += 1 # iterates over all .json files in requested directory messages", ":param data_source: dictionary containing prepared data generated by the get_data() function :param conversation:", "if key.startswith(user_input[1]): if len(user_input) < 3: hours_conversation(key) else: hours_conversation(key, float(user_input[2])) break else: print('Conversation", "found.') return None, None if conversation is not None: for key in data.keys():", "number of messages per month across all conversation. :return: None \"\"\" messages =", "def daily(difference, conversation=None): \"\"\" Shows chart of number of messages per day. :param", "row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1) counted_all += sum(frame['counted'], collections.Counter()) except KeyError: break total[sender] = dict(counted_all)", "number of messages send by hour throughout the day. :param difference: number of", "'daily': if len(user_input) > 1 and not user_input[1] == '-h': try: data =", "working) def monthly_conversation(conversation): # TODO not working charts for monthly \"\"\" Shows chart", "def yearly(conversation=None): \"\"\" Shows chart of number of messages per year. :param conversation:", "contains requested messages (usually conversation id) :param function: pandas function that returns requested", "in data.index: if key.startswith(' '.join(user_input[1:])): user_statistics(data, key) break else: print('Conversation not found.') except", "x: x.split('_')[0][:30]) pd.set_option('display.max_rows', None) print(user_name, 'statistics:') print(data_source) # Intervals def interval_count(inbox_name, function, delta=0.0):", "else: daily_chats() if user_input[0] == 'monthly': if len(user_input) > 1: try: data =", "while True: try: i += 1 # iterates over all .json files in", "function :return: None \"\"\" data_source = pd.DataFrame(data_source).fillna(0).astype('int') pd.set_option('display.max_rows', None) total_values = data_source.loc['total'].sort_values(ascending=False) print(total_values)", "x: x.dt.hour, delta) hours_plot(messages, delta) def hours_plot(messages, delta): \"\"\" Shows chart of average", "x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.hour, delta) hours_plot(messages,", "if filename[1] == ':' \\ else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try: global", "messages per year. :param conversation: conversation id or None for statistics from all", "counted.' if chars else 'Messages not counted.') # Counting messages and characters def", ":param inbox_name: directory name that contains requested messages (usually conversation id) :param function:", "if len(user_input) > 1: for key in data.keys(): if key.startswith(user_input[1]): yearly_conversation(key) break else:", "in namelist if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')} for sender in", "\"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.date, delta)) def daily_chats(delta=0.0): \"\"\" Shows chart of number", "containing the data and if applicable a key pointing to a specific conversation,", ":param conversation: conversation id, or key from get_data() function :return: None \"\"\" pass", "conversation_statistics(data, key) break else: print('Conversation not found.') except FileNotFoundError: if input('Messages not counted.", "(default None) :return: None \"\"\" if conversation is None: hours_chats(difference) else: data =", "else: print(conversation) conversation_statistics(data_source, conversation) def messages_statistics(data_source): \"\"\" Prints messages overall statistics of given", "filename = f'file:///{filename}' if filename[1] == ':' \\ else (f'file:./{filename}' if filename.endswith('.zip') else", "is None: if chars: characters_statistics(data_source) else: messages_statistics(data_source) else: if chars: raise NotImplementedError() else:", "exits the program') if user_input[0] == 'stats': if len(user_input) > 2 and user_input[2]", "import zipfile import logging from urllib.error import URLError from urllib.request import urlopen import", "+ sender + '/message_' + str(i) + '.json').read())['messages']) frame['counted'] = frame.apply( lambda row:", "False) :return: None \"\"\" if chars: count_characters() else: count_messages() # Statistics def statistics(data_source,", "beginning of the conversation. :param conversation: conversation id, or key from get_data() function", "# TODO not working charts for monthly \"\"\" Shows chart of number of", "False otherwise (default False) :return: dictionary containing the data and if applicable a", "messages or characters from messages and saves output to the file. :param chars:", "else: if chars: raise NotImplementedError() else: print(conversation) conversation_statistics(data_source, conversation) def messages_statistics(data_source): \"\"\" Prints", "the file. :param chars: True for counting characters, False for counting messages (default", "conversation is None: yearly_chats() else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in", "\"\"\" Shows chart of number of messages per year across all conversation. :return:", "filename: path to the downloaded .zip file :return: None You can provide relative", "!= 0 else 24)], rotation=90) plt.xlim(-1, 24) plt.savefig('messages.pdf') plt.show() # Daily def daily(difference,", "get_data() function :param conversation: conversation id or None for overall statistics (default None)", "hours_plot(interval_count(conversation, lambda x: x.dt.hour, delta), delta) def hours_chats(delta=0.0): \"\"\" Shows chart of average", "of average number of messages send in specific conversation by hour throughout the", "[f'{x % 24}:{int(abs((delta - int(delta)) * 60)):02}' for x in range(-(-math.floor(delta) % 24),", "== 'y': count_messages() elif len(user_input) > 1 and user_input[1] == '-h': daily_chats(float(user_input[2])) else:", "if len(user_input) > 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input) >", "except FileNotFoundError: if input('Characters not counted. Count characters?[y/n] ').lower() == 'y': count_characters() elif", "source. :param data_source: dictionary containing prepared data generated by the get_data() function :return:", ":return: None \"\"\" hours_plot(interval_count(conversation, lambda x: x.dt.hour, delta), delta) def hours_chats(delta=0.0): \"\"\" Shows", "yearly_chats(): \"\"\" Shows chart of number of messages per year across all conversation.", "messages_statistics(data_source) else: if chars: raise NotImplementedError() else: print(conversation) conversation_statistics(data_source, conversation) def messages_statistics(data_source): \"\"\"", "user=False): \"\"\" Reads data from messages.json or messages_chars.json and finds key based on", "of messages send by hour throughout the day. :param difference: number of hours", "'monthly': if len(user_input) > 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input)", "json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) characters_statistics(data) except FileNotFoundError: if input('Characters not counted. Count characters?[y/n] ').lower()", "Count messages?[y/n] ').lower() == 'y': count_messages() elif len(user_input) > 1 and user_input[1] ==", "try again.') def get_data(conversation=None, chars=False, user=False): \"\"\" Reads data from messages.json or messages_chars.json", "of hours to time shift by and count messages differently (default 0.0) :return:", "chart based on previously defined timeframe :param messages: dictionary of number of messages", "Shows chart of number of messages per year. :param conversation: conversation id or", "month across all conversation. :return: None \"\"\" messages = collections.Counter() for sender in", "interval_plot(messages) # Yearly def yearly(conversation=None): \"\"\" Shows chart of number of messages per", "and user_input[1] == '-c': try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) characters_statistics(data) except FileNotFoundError:", "None \"\"\" data_source = pd.DataFrame(data_source) data_source['total'] = data_source.sum(axis=1) data_source = data_source.iloc[:, -1] data_source", "average number of messages send by hour throughout the day. :param difference: number", "= json.loads(open('messages.json', 'r', encoding='utf-8').read()) messages_statistics(data) except FileNotFoundError: if input('Messages not counted. Count messages?[y/n]", "by hour throughout the day. :param messages: dictionary of number of messages grouped", "\"\"\" Shows chart of number of messages per day across all conversation. :param", "None) :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]'))) def monthly_chats(): \"\"\" Shows chart", "x.startswith('messages/inbox/') and x != 'messages/inbox/')} for sender in senders: counted_all, i = collections.Counter(),", "key.startswith(user_input[1]): monthly_conversation(key) else: print('Conversation not found.') else: print('Please specify conversation.') except FileNotFoundError: if", "by the get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source).fillna(0).astype('int') pd.set_option('display.max_rows', None) total_values", "') if user_input[0] == 'exit': break if user_input[0] == '' or user_input[0] ==", "% 24 if math.floor(delta) % 24 != 0 else 24)], rotation=90) plt.xlim(-1, 24)", "'r', encoding='utf-8').read()) for key in data.keys(): if key.lower().startswith(conversation.lower()): yearly_conversation(key) break else: print('Conversation not", "conversation of given data source. :param data_source: dictionary containing prepared data generated by", "name, or key from get_data() function :return: None \"\"\" data_source = data_source.loc[user_name] data_source", "if user_input[0] == 'daily': if len(user_input) > 1 and not user_input[1] == '-h':", "yearly_conversation(conversation): \"\"\" Shows chart of number of messages per year from the beginning", "and not user_input[1] == '-c': try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key", "file. :param filename: path to the downloaded .zip file :return: None You can", "key.lower().startswith(conversation.lower()): hours_conversation(key, difference) break else: print('Conversation not found.') def hours_conversation(conversation, delta=0.0): \"\"\" Shows", "x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]')) interval_plot(messages) # Yearly def", "\"\"\" data_source = data_source.loc[user_name] data_source = data_source[data_source > 0].sort_values(ascending=False) data_source.index = data_source.index.map(lambda x:", ":param difference: number of hours to time shift by and show statistics differently", "per year across all conversation. :return: None \"\"\" messages = collections.Counter() for sender", "for overall statistics (default None) :param chars: True for counting chars in messages_chars.json,", "function :param user_name: person name, or key from get_data() function :return: None \"\"\"", "json.dump(total, output, ensure_ascii=False) def count(chars=False): \"\"\" Counts messages or characters from messages and", "if key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation not found.') return None, None if", "key from get_data() function :param delta: number of hours to time shift by", "> 1 and user_input[1] == '-c': try: data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) characters_statistics(data)", "inbox_name + '/message_' + str(i) + '.json').read())[ 'messages']).iloc[:, 1], unit='ms').dt.tz_localize('UTC').dt.tz_convert( 'Europe/Warsaw').add(pd.Timedelta(hours=-delta)))) except KeyError:", "the beginning of the string. :param conversation: beginning of the conversation id or", "sum(frame['counted'], collections.Counter()) except KeyError: break total[sender] = dict(counted_all) with open('messages_chars.json', 'w', encoding='utf-8') as", "True for counting chars in messages_chars.json, False for counting messages in messages.json (default", "1: for key in data.keys(): if key.startswith(user_input[1]): yearly_conversation(key) break else: print('Conversation not found.')", "plt.show() def conversation_statistics(data_source, conversation): \"\"\" Prints messages statistics for specific conversation of given", "interval_plot(interval_count(conversation, lambda x: x.dt.date, delta)) def daily_chats(delta=0.0): \"\"\" Shows chart of number of", "messages?[y/n] ').lower() == 'y': count_messages() elif len(user_input) > 1 and user_input[1] == '-h':", "= json.loads(open('messages_chars.json', 'r', encoding='utf-8').read()) characters_statistics(data) except FileNotFoundError: if input('Characters not counted. Count characters?[y/n]", "logging.error('Conversation not found.') return None, None if conversation is not None: for key", "for specific conversation of given data source. :param data_source: dictionary containing prepared data", "if len(user_input) > 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) data = pd.DataFrame(data).fillna(0).astype('int')", "messages per month across all conversation. :return: None \"\"\" messages = collections.Counter() for", "number of messages per day from the beginning of the conversation. :param conversation:", "conversation, otherwise None \"\"\" try: data = json.loads(open('messages_chars.json' if chars else 'messages.json', 'r',", "in data.keys(): if key.startswith(user_input[1]): if len(user_input) < 3: daily_conversation(key) else: daily_conversation(key, float(user_input[2])) break", "all .json files in requested directory messages += collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads( source.open('messages/inbox/' + inbox_name +", "= data_source.index.map(lambda x: x.split('_')[0][:30]) pd.set_option('display.max_rows', None) print(user_name, 'statistics:') print(data_source) # Intervals def interval_count(inbox_name,", "= total_values.sort_values() plt.rcdefaults() plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:]) plt.show() def conversation_statistics(data_source, conversation): \"\"\" Prints messages statistics", "\"\"\" if conversation is None: daily_chats(difference) else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for", "- int(delta)) * 60)):02}' for x in range(-(-math.floor(delta) % 24), math.floor(delta) % 24", "shift by and show statistics differently :param conversation: conversation id or None for", "from urllib.error import URLError from urllib.request import urlopen import pandas as pd from", "'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]')) interval_plot(messages) # Yearly def yearly(conversation=None): \"\"\"", "[name, -h] - daily messages') print(' [specific user, hours difference]') print(' hours [name,", "import pyplot as plt # Getting data def set_source(filename): \"\"\" Sets source global", "on the beginning of the string. :param conversation: beginning of the conversation id", "json import math import zipfile import logging from urllib.error import URLError from urllib.request", "None for overall statistics (default None) :param chars: True for character statistics instead", "> 1 and user_input[1] == '-h': daily_chats(float(user_input[2])) else: daily_chats() if user_input[0] == 'monthly':", "Shows chart of number of messages per year from the beginning of the", "URLError: print('File not found, try again.') while True: user_input = input('>').split(' ') if", "\"\"\" try: data = json.loads(open('messages_chars.json' if chars else 'messages.json', 'r', encoding='utf-8').read()) if user:", "== 'daily': if len(user_input) > 1 and not user_input[1] == '-h': try: data", "raise NotImplementedError() else: print(conversation) conversation_statistics(data_source, conversation) def messages_statistics(data_source): \"\"\" Prints messages overall statistics", "on Windows) >>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip') \"\"\" filename = f'file:///{filename}' if filename[1] == ':' \\", "not found.') except FileNotFoundError: if input('Characters not counted. Count characters?[y/n] ').lower() == 'y':", "if user_input[0] == 'hours': if len(user_input) > 1 and not user_input[1] == '-h':", "= pd.DataFrame(data_source) data_source['total'] = data_source.sum(axis=1) data_source = data_source.iloc[:, -1] data_source = data_source.sort_values(ascending=False).astype('int') pd.set_option('display.max_rows',", ":param filename: path to the downloaded .zip file :return: None You can provide", "to messages.json. :return: None \"\"\" namelist = source.namelist() total, senders = {}, {x.split('/')[2]", "[detailed statistics for specific conversation, character statistics]') print(' user [name] - detailed statistics", "containing prepared data generated by the get_data() function :return: None \"\"\" data_source =", ">>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip') \"\"\" filename = f'file:///{filename}' if filename[1] == ':' \\ else (f'file:./{filename}'", "messages?[y/n] ').lower() == 'y': count_messages() else: monthly_chats() if user_input[0] == 'yearly': if len(user_input)", "user_input[0] == 'user': if len(user_input) > 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read())", "messages') print(' [detailed statistics for specific conversation, character statistics]') print(' user [name] -", "else: print('Conversation not found.') def daily_conversation(conversation, delta=0.0): \"\"\" Shows chart of number of", "except FileNotFoundError: logging.error('Characters not counted.' if chars else 'Messages not counted.') # Counting", "= data_source.loc['total'].sort_values(ascending=False) print(total_values) print(total_values.describe()) total_values = total_values.sort_values() plt.rcdefaults() plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:]) plt.show() def conversation_statistics(data_source,", "show statistics differently :return: None \"\"\" messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0])", "send in specific conversation by hour throughout the day. :param conversation: conversation id,", "'count': count_messages() if user_input[0] == 'chars': count_characters() if user_input[0] == 'help' or user_input[0]", "for key in data.keys(): if key.startswith(user_input[1]): conversation_statistics(data, key) break else: print('Conversation not found.')", "conversation] data_source = data_source[data_source > 0].sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) def characters_statistics(data_source): \"\"\" Prints", "lambda x: x.dt.to_period(\"M\").astype('datetime64[ns]')) interval_plot(messages) # Yearly def yearly(conversation=None): \"\"\" Shows chart of number", "messages send across all conversations by hour throughout the day. :param delta: number", "').lower() == 'y': count_messages() else: yearly_chats() if user_input[0] == 'hours': if len(user_input) >", "messages statistics for specific conversation of given data source. :param data_source: dictionary containing", "f'file:///{filename}' if filename[1] == ':' \\ else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try:", "try: global source source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) except URLError: logging.error('File not found, try again.')", "None, None if conversation is not None: for key in data.keys(): if key.lower().startswith(conversation.lower()):", "'Europe/Warsaw').add(pd.Timedelta(hours=-delta)))) except KeyError: break return messages def interval_plot(messages): \"\"\" Shows chart based on", "if key.startswith(user_input[1]): yearly_conversation(key) break else: print('Conversation not found.') else: print('Please specify conversation.') except", "interval_count(sender, lambda x: x.dt.year) messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.savefig('messages.pdf') plt.show()", "conversation.') except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages()", "of average number of messages send by hour throughout the day. :param difference:", "data_source = pd.DataFrame(data_source).fillna(0).astype('int') pd.set_option('display.max_rows', None) total_values = data_source.loc['total'].sort_values(ascending=False) print(total_values) print(total_values.describe()) total_values = total_values.sort_values()", "x.dt.date, delta) interval_plot(messages) # Monthly (not working) def monthly_conversation(conversation): # TODO not working", "plt.show() # Hours def hours(difference, conversation=None): \"\"\" Shows chart of average number of", "range(-(-math.floor(delta) % 24), math.floor(delta) % 24 if math.floor(delta) % 24 != 0 else", "+ '.json').read())[ 'messages']).iloc[:, 0]) except KeyError: break total[sender] = {k.encode('iso-8859-1').decode('utf-8'): v for k,", "== 'monthly': if len(user_input) > 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if", "1: for key in data.keys(): if key.startswith(user_input[1]): monthly_conversation(key) else: print('Conversation not found.') else:", "import math import zipfile import logging from urllib.error import URLError from urllib.request import", "if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: yearly_chats() if", "if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')} for sender in senders: counted_all,", "messages += interval_count(sender, lambda x: x.dt.year) messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0])", "downloaded .zip file :return: None You can provide relative path to file >>>", "messages and saves output to the file. :param chars: True for counting characters,", "None \"\"\" if chars: count_characters() else: count_messages() # Statistics def statistics(data_source, conversation=None, chars=False):", "').lower() == 'y': count_characters() else: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) messages_statistics(data) except", "source.namelist() total, senders = {}, {x.split('/')[2] for x in namelist if (x.endswith('/') and", "counts all characters and saves to messages_chars.json') print(' stats [conversation, -c] - displays", "= json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input) > 1: for key in data.keys(): if", "zipfile import logging from urllib.error import URLError from urllib.request import urlopen import pandas", "data.keys(): if key.lower().startswith(conversation.lower()): return data, key else: logging.error('Conversation not found.') return None, None", "+= interval_count(sender, lambda x: x.dt.hour, delta) hours_plot(messages, delta) def hours_plot(messages, delta): \"\"\" Shows", "logging.error('Characters not counted.' if chars else 'Messages not counted.') # Counting messages and", "x in range(-(-math.floor(delta) % 24), math.floor(delta) % 24 if math.floor(delta) % 24 !=", "this help prompt') print(' exit - exits the program') if user_input[0] == 'stats':", "print(total_values) print(total_values.describe()) total_values = total_values.sort_values() plt.rcdefaults() plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:]) plt.show() def conversation_statistics(data_source, conversation): \"\"\"", "encoding='utf-8').read()) if len(user_input) > 1: for key in data.keys(): if key.startswith(user_input[1]): monthly_conversation(key) else:", "[specific user, hours difference]') print(' help - displays this help prompt') print(' exit", "FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() == 'y': count_messages() elif len(user_input)", "= json.loads(open('messages_chars.json' if chars else 'messages.json', 'r', encoding='utf-8').read()) if user: data = pd.DataFrame(data).fillna(0).astype('int')", "'-h': try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) if len(user_input) > 1: for key", "\"\"\" Shows chart of number of messages per month across all conversation. :return:", "(default None) :param chars: True for counting chars in messages_chars.json, False for counting", "data_source.loc['total'].sort_values(ascending=False) print(total_values) print(total_values.describe()) total_values = total_values.sort_values() plt.rcdefaults() plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:]) plt.show() def conversation_statistics(data_source, conversation):", "Shows chart of average number of messages send in specific conversation by hour", "overall statistics of given data source. :param data_source: dictionary containing prepared data generated", "id, or key from get_data() function :return: None \"\"\" data_source = pd.DataFrame(data_source) data_source", "messages') print(' [specific user, hours difference]') print(' hours [name, -h] - hour distribution", "None for overall statistics (default None) :param chars: True for counting chars in", "else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip') try: source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) break except URLError:", "messages?[y/n] ').lower() == 'y': count_messages() if user_input[0] == 'user': if len(user_input) > 1:", "Count characters?[y/n] ').lower() == 'y': count_characters() else: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read())", "messages differently (default 0.0) :return: dictionary of number of messages grouped by timeframe", "key pointing to a specific conversation, otherwise None \"\"\" try: data = json.loads(open('messages_chars.json'", "x in namelist if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')} for sender", "'chars': count_characters() if user_input[0] == 'help' or user_input[0] == '?': print('Messenger Counter available", "collections.Counter()) except KeyError: break total[sender] = dict(counted_all) with open('messages_chars.json', 'w', encoding='utf-8') as output:", "dictionary containing prepared data generated by the get_data() function :param user_name: person name,", "not counted. Count messages?[y/n] ').lower() == 'y': count_messages() else: yearly_chats() if user_input[0] ==", "= sum(messages.values()) with open('messages.json', 'w', encoding='utf-8') as output: json.dump(total, output, ensure_ascii=False) def count_characters():", "messages?[y/n] ').lower() == 'y': count_messages() elif len(user_input) > 1 and user_input[1] == '-c':", "def conversation_statistics(data_source, conversation): \"\"\" Prints messages statistics for specific conversation of given data", "key in data.keys(): if key.startswith(user_input[1]): monthly_conversation(key) else: print('Conversation not found.') else: print('Please specify", "show statistics differently (default 0.0) :return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.date, delta))", "filename: ') filename = f'file:///{filename}' if filename[1] == ':'\\ else (f'file:./{filename}' if filename.endswith('.zip')", "else f'file:./{filename}.zip') try: source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read())) break except URLError: print('File not found, try", "data_source.loc[:, conversation] data_source = data_source[data_source > 0].sort_values(ascending=False).astype('int') pd.set_option('display.max_rows', None) print(data_source) def characters_statistics(data_source): \"\"\"", "statistics instead of messages, False otherwise (default False) :return: None \"\"\" if conversation", "in requested directory messages += collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads( source.open('messages/inbox/' + inbox_name + '/message_' + str(i)", "exit - exits the program') if user_input[0] == 'stats': if len(user_input) > 2", "and x != 'messages/inbox/')} for sender in senders: messages, i = collections.Counter(), 0", "data.keys(): if key.startswith(user_input[1]): if len(user_input) < 3: daily_conversation(key) else: daily_conversation(key, float(user_input[2])) break else:", "conversation) def messages_statistics(data_source): \"\"\" Prints messages overall statistics of given data source. :param", "and show statistics differently (default 0.0) :return: None \"\"\" messages = collections.Counter() for", "of the conversation id or None for overall statistics (default None) :param chars:", "= pd.Series(messages).sort_index() print(messages.describe()) plt.bar(messages.index, messages) plt.savefig('messages.pdf') plt.show() # Hours def hours(difference, conversation=None): \"\"\"", "hours difference]') print(' hours [name, -h] - hour distribution of messages') print(' [specific", "user: True for user name instead of conversation id, False otherwise (default False)", ":param data_source: dictionary containing prepared data generated by the get_data() function :return: None", "- counts all characters and saves to messages_chars.json') print(' stats [conversation, -c] -", "# Intervals def interval_count(inbox_name, function, delta=0.0): \"\"\" Counts number of messages based on", "Prints characters statistics of given data source. :param data_source: dictionary containing prepared data", "output: json.dump(total, output, ensure_ascii=False) def count_characters(): \"\"\" Counts characters from messages and saves", "None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.date, delta)) def daily_chats(delta=0.0): \"\"\" Shows chart of", "json.loads(open('messages.json', 'r', encoding='utf-8').read()) data = pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if key.startswith(' '.join(user_input[1:])):", "if chars: characters_statistics(data_source) else: messages_statistics(data_source) else: if chars: raise NotImplementedError() else: print(conversation) conversation_statistics(data_source,", "differently (default 0.0) :return: dictionary of number of messages grouped by timeframe \"\"\"", "messages (available soon)') # print(' [specific user, day difference]') print(' daily [name, -h]", "user_input[0] == 'monthly': if len(user_input) > 1: try: data = json.loads(open('messages.json', 'r', encoding='utf-8').read())", "encoding='utf-8').read()) characters_statistics(data) except FileNotFoundError: if input('Characters not counted. Count characters?[y/n] ').lower() == 'y':", "x: x.dt.year) messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.savefig('messages.pdf') plt.show() if __name__=='__main__':", "the program') if user_input[0] == 'stats': if len(user_input) > 2 and user_input[2] ==", "from messages and saves output to the file. :param chars: True for counting", "for key in data.keys(): if key.lower().startswith(conversation.lower()): daily_conversation(key, difference) break else: print('Conversation not found.')", "\"\"\" Reads data from messages.json or messages_chars.json and finds key based on the", "yearly_chats() if user_input[0] == 'hours': if len(user_input) > 1 and not user_input[1] ==", "messages and saves output to messages_chars.json. :return: None \"\"\" namelist = source.namelist() total,", "\"\"\" if conversation is None: hours_chats(difference) else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for", "'/message_' + str(i) + '.json').read())[ 'messages']).iloc[:, 0]) except KeyError: break total[sender] = {k.encode('iso-8859-1').decode('utf-8'):", "hours_conversation(key) else: hours_conversation(key, float(user_input[2])) break else: print('Conversation not found.') else: print('Please specify conversation.')", "'r', encoding='utf-8').read()) messages_statistics(data) except FileNotFoundError: if input('Messages not counted. Count messages?[y/n] ').lower() ==", "senders: counted_all, i = collections.Counter(), 0 while True: try: i += 1 frame", "namelist = source.namelist() total, senders = {}, {x.split('/')[2] for x in namelist if", "\"\"\" Prints messages statistics for specific conversation of given data source. :param data_source:", ":return: None \"\"\" interval_plot(interval_count(conversation, lambda x: x.dt.date, delta)) def daily_chats(delta=0.0): \"\"\" Shows chart", "count_characters() elif len(user_input) > 1 and not user_input[1] == '-c': try: data =", "timeframe \"\"\" messages, i = collections.Counter(), 0 while True: try: i += 1", "time shift by and show statistics differently (default 0.0) :return: None \"\"\" messages", ":return: None \"\"\" messages = pd.DataFrame(messages, index=[0]) print(messages.iloc[0].describe()) plt.bar(messages.columns, messages.iloc[0]) plt.xticks(list(range(24)), [f'{x %", "is None: hours_chats(difference) else: data = json.loads(open('messages.json', 'r', encoding='utf-8').read()) for key in data.keys():", "data = pd.DataFrame(data).fillna(0).astype('int') for key in data.index: if key.startswith(' '.join(user_input[1:])): user_statistics(data, key) break", "of messages') print(' [specific user, hours difference]') print(' help - displays this help", "1], unit='ms').dt.tz_localize('UTC').dt.tz_convert( 'Europe/Warsaw').add(pd.Timedelta(hours=-delta)))) except KeyError: break return messages def interval_plot(messages): \"\"\" Shows chart", "function :return: None \"\"\" data_source = pd.DataFrame(data_source) data_source = data_source.loc[:, conversation] data_source =", "if user_input[0] == 'yearly': if len(user_input) > 1: try: data = json.loads(open('messages.json', 'r',", "(x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}: messages += interval_count(sender, lambda x: x.dt.date,", "of number of messages per day across all conversation. :param delta: number of", "messages.iloc[0]) plt.xticks(list(range(24)), [f'{x % 24}:{int(abs((delta - int(delta)) * 60)):02}' for x in range(-(-math.floor(delta)", "'r', encoding='utf-8').read()) if len(user_input) > 1: for key in data.keys(): if key.startswith(user_input[1]): if", "x: x.dt.date, delta)) def daily_chats(delta=0.0): \"\"\" Shows chart of number of messages per", "chars=False): \"\"\" Prints statistics of given data source. :param data_source: dictionary containing prepared", "if user_input[0] == '' or user_input[0] == 'count': count_messages() if user_input[0] == 'chars':" ]
[ "ev.z[0] == 0' ), LambdaStr( name='ev : ev.w[0] >= 300', lambda_str='ev : ev.w[0]", ") ), dict( factory='NotFactory', components=( dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev :", "1', 'ev : ev.y[0] >= 100', )), dict(Not=dict( Any=( 'ev : ev.z[0] ==", "name='ev : ev.nJets[0] >= 2', lambda_str='ev : ev.nJets[0] >= 2', ), id='string:lambda_str' ),", "marks=pytest.mark.skip(reason='not fully expanded') ), ] @pytest.mark.parametrize('path_cfg, expected, _', params) def test_expand_path_cfg(path_cfg, expected, _):", "obj = FactoryDispatcher(path_cfg=path_cfg, **kargs) assert repr(expected) == repr(obj) assert str(expected) == str(obj) ##__________________________________________________________________||", "dict(All=()), {'factory': 'AllFactory', 'components': ()}, All(name='All', selections=[]), id='dict-all-empty' ), pytest.param( dict(Any=()), {'factory': 'AnyFactory',", "from alphatwirl.selection.factories.factory import FactoryDispatcher from alphatwirl.selection.modules.LambdaStr import LambdaStr from alphatwirl.selection.modules import All, Any,", "import All, Any, Not ##__________________________________________________________________|| # path_cfg, expanded, obj params = [ pytest.param(", "ValueError, id='multiple-vertices-Any-Not' ), pytest.param( dict(), ValueError, id='empty-dict' ), ]) def test_expand_path_cfg_raise(path_cfg, error): with", ": ev.x[0] >= 1'), LambdaStr( name='ev : ev.y[0] >= 100', lambda_str='ev : ev.y[0]", "expected): kargs = dict( AllClass=All, AnyClass=Any, NotClass=Not, LambdaStrClass=LambdaStr, n=5242, ) obj = FactoryDispatcher(path_cfg=path_cfg,", "Any=()), ValueError, id='multiple-vertices-All-Any' ), pytest.param( dict(All=(), Not=()), ValueError, id='multiple-vertices-All-Not' ), pytest.param( dict(Any=(), Not=()),", "components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] >= 1', ), dict( factory='LambdaStrFactory', components=(),", "import os import sys import pytest from alphatwirl.selection.factories.expand import expand_path_cfg from alphatwirl.selection.factories.factory import", "ev.y[0] >= 100', )), dict(Not=dict( Any=( 'ev : ev.z[0] == 0', 'ev :", ": ev.x[0] == 0' ), All( name='All', selections=[ LambdaStr( name='ev : ev.x[0] >=", "n=5242, ) obj = FactoryDispatcher(path_cfg=path_cfg, **kargs) assert repr(expected) == repr(obj) assert str(expected) ==", "{n}', ), LambdaStr( name='ev : ev.nJets[0] >= 5242', lambda_str='ev : ev.nJets[0] >= 5242',", "@pytest.mark.parametrize('path_cfg, error', [ pytest.param( dict(All=(), Any=()), ValueError, id='multiple-vertices-All-Any' ), pytest.param( dict(All=(), Not=()), ValueError,", "pytest from alphatwirl.selection.factories.expand import expand_path_cfg from alphatwirl.selection.factories.factory import FactoryDispatcher from alphatwirl.selection.modules.LambdaStr import LambdaStr", "pytest.param( dict(Any=(), Not=()), ValueError, id='multiple-vertices-Any-Not' ), pytest.param( dict(), ValueError, id='empty-dict' ), ]) def", "assert str(expected) == str(obj) ##__________________________________________________________________|| @pytest.mark.parametrize('path_cfg, error', [ pytest.param( dict(All=(), Any=()), ValueError, id='multiple-vertices-All-Any'", ": ev.z[0] == 0', lambda_str='ev : ev.z[0] == 0' ), LambdaStr( name='ev :", "()}, Any(name='Any', selections=[]), id='dict-any-empty' ), pytest.param( dict(Any=( 'ev : ev.x[0] == 0', dict(All=(", "300', ), ), ), ), ) ) ), Any( name='Any', selections=[ LambdaStr( name='ev", "actual # give expanded one actual = expand_path_cfg(path_cfg=actual) assert expected == actual @pytest.mark.parametrize('path_cfg,", "), pytest.param( dict(All=()), {'factory': 'AllFactory', 'components': ()}, All(name='All', selections=[]), id='dict-all-empty' ), pytest.param( dict(Any=()),", ": ev.nJets[0] >= 2', dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= 2', ),", "pytest.param( dict(Any=()), {'factory': 'AnyFactory', 'components': ()}, Any(name='Any', selections=[]), id='dict-any-empty' ), pytest.param( dict(Any=( 'ev", "ev.x[0] == 0', dict(All=( 'ev : ev.x[0] >= 1', 'ev : ev.y[0] >=", ": ev.z[0] == 0' ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.w[0] >= 300',", "components=(), lambda_str='ev : ev.z[0] == 0' ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.w[0]", "actual = expand_path_cfg(path_cfg=path_cfg) assert expected == actual # give expanded one actual =", "actual @pytest.mark.parametrize('path_cfg, _, expected', params) def test_factory(path_cfg, _, expected): kargs = dict( AllClass=All,", "dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= 2', ), LambdaStr( name='ev : ev.nJets[0]", "= [ pytest.param( 'ev : ev.nJets[0] >= 2', dict( components=(), factory='LambdaStrFactory', lambda_str='ev :", "factory='LambdaStrFactory', components=(), lambda_str='ev : ev.z[0] == 0' ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev :", "lambda_str='ev : ev.nJets[0] >= {n}', ), LambdaStr( name='ev : ev.nJets[0] >= 5242', lambda_str='ev", ": ev.z[0] == 0', 'ev : ev.w[0] >= 300', ), )), )), dict(", "0', lambda_str='ev : ev.z[0] == 0' ), LambdaStr( name='ev : ev.w[0] >= 300',", ">= 2', dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= 2', ), LambdaStr( name='ev", "selections=[]), id='dict-all-empty' ), pytest.param( dict(Any=()), {'factory': 'AnyFactory', 'components': ()}, Any(name='Any', selections=[]), id='dict-any-empty' ),", "{n}', dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= {n}', ), LambdaStr( name='ev :", ": ev.x[0] >= 1', ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.y[0] >= 100',", "dict( AllClass=All, AnyClass=Any, NotClass=Not, LambdaStrClass=LambdaStr, n=5242, ) obj = FactoryDispatcher(path_cfg=path_cfg, **kargs) assert repr(expected)", "components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.z[0] == 0' ), dict( factory='LambdaStrFactory', components=(),", "params) def test_expand_path_cfg(path_cfg, expected, _): actual = expand_path_cfg(path_cfg=path_cfg) assert expected == actual #", "dict(Not=dict( Any=( 'ev : ev.z[0] == 0', 'ev : ev.w[0] >= 300', ),", "LambdaStr( name='ev : ev.y[0] >= 100', lambda_str='ev : ev.y[0] >= 100') ] ),", "lambda_str='ev : ev.x[0] == 0' ), All( name='All', selections=[ LambdaStr( name='ev : ev.x[0]", "== actual # give expanded one actual = expand_path_cfg(path_cfg=actual) assert expected == actual", ">= {n}', ), LambdaStr( name='ev : ev.nJets[0] >= 5242', lambda_str='ev : ev.nJets[0] >=", "factory='NotFactory', components=( dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.z[0] == 0'", ">= 5242', ), id='string:lambda_str-not-formatted' ), pytest.param( dict(All=()), {'factory': 'AllFactory', 'components': ()}, All(name='All', selections=[]),", "= expand_path_cfg(path_cfg=actual) assert expected == actual @pytest.mark.parametrize('path_cfg, _, expected', params) def test_factory(path_cfg, _,", "ev.nJets[0] >= {n}', dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= {n}', ), LambdaStr(", "pytest.param( 'ev : ev.nJets[0] >= 2', dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >=", "dict(Any=()), {'factory': 'AnyFactory', 'components': ()}, Any(name='Any', selections=[]), id='dict-any-empty' ), pytest.param( dict(Any=( 'ev :", "_, expected): kargs = dict( AllClass=All, AnyClass=Any, NotClass=Not, LambdaStrClass=LambdaStr, n=5242, ) obj =", "] ), id='example', ## marks=pytest.mark.skip(reason='not fully expanded') ), ] @pytest.mark.parametrize('path_cfg, expected, _', params)", ": ev.y[0] >= 100', )), dict(Not=dict( Any=( 'ev : ev.z[0] == 0', 'ev", "lambda_str='ev : ev.x[0] >= 1'), LambdaStr( name='ev : ev.y[0] >= 100', lambda_str='ev :", "factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.z[0] == 0' ), dict( factory='LambdaStrFactory',", "= dict( AllClass=All, AnyClass=Any, NotClass=Not, LambdaStrClass=LambdaStr, n=5242, ) obj = FactoryDispatcher(path_cfg=path_cfg, **kargs) assert", "100', ) ) ), dict( factory='NotFactory', components=( dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(),", "factory='LambdaStrFactory', components=(), lambda_str='ev : ev.w[0] >= 300', ), ), ), ), ) )", ")), )), dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] == 0',", "dict( factory='AllFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] >= 1', ), dict(", "id='string:lambda_str-not-formatted' ), pytest.param( dict(All=()), {'factory': 'AllFactory', 'components': ()}, All(name='All', selections=[]), id='dict-all-empty' ), pytest.param(", ">= 300', lambda_str='ev : ev.w[0] >= 300' ) ] ) ) ] ),", "dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.y[0] >= 100', ) ) ), dict( factory='NotFactory',", "{'factory': 'AnyFactory', 'components': ()}, Any(name='Any', selections=[]), id='dict-any-empty' ), pytest.param( dict(Any=( 'ev : ev.x[0]", "assert expected == actual @pytest.mark.parametrize('path_cfg, _, expected', params) def test_factory(path_cfg, _, expected): kargs", "), id='string:lambda_str' ), pytest.param( 'ev : ev.nJets[0] >= {n}', dict( components=(), factory='LambdaStrFactory', lambda_str='ev", "repr(obj) assert str(expected) == str(obj) ##__________________________________________________________________|| @pytest.mark.parametrize('path_cfg, error', [ pytest.param( dict(All=(), Any=()), ValueError,", "Not=()), ValueError, id='multiple-vertices-Any-Not' ), pytest.param( dict(), ValueError, id='empty-dict' ), ]) def test_expand_path_cfg_raise(path_cfg, error):", "100', lambda_str='ev : ev.y[0] >= 100') ] ), Not( name='Not', selection=Any( name='Any', selections=[", "factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] == 0', ), dict( factory='AllFactory', components=( dict( factory='LambdaStrFactory',", "id='multiple-vertices-All-Any' ), pytest.param( dict(All=(), Not=()), ValueError, id='multiple-vertices-All-Not' ), pytest.param( dict(Any=(), Not=()), ValueError, id='multiple-vertices-Any-Not'", "factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] == 0', ), dict( factory='AllFactory',", "components=( dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.z[0] == 0' ),", "lambda_str='ev : ev.x[0] == 0', ), dict( factory='AllFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev", "'ev : ev.nJets[0] >= {n}', dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= {n}',", "def test_expand_path_cfg(path_cfg, expected, _): actual = expand_path_cfg(path_cfg=path_cfg) assert expected == actual # give", "ev.z[0] == 0' ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.w[0] >= 300', ),", "dict(All=(), Not=()), ValueError, id='multiple-vertices-All-Not' ), pytest.param( dict(Any=(), Not=()), ValueError, id='multiple-vertices-Any-Not' ), pytest.param( dict(),", "), dict( factory='NotFactory', components=( dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.z[0]", "LambdaStr( name='ev : ev.x[0] >= 1', lambda_str='ev : ev.x[0] >= 1'), LambdaStr( name='ev", "'ev : ev.nJets[0] >= 2', dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= 2',", "alphatwirl.selection.factories.factory import FactoryDispatcher from alphatwirl.selection.modules.LambdaStr import LambdaStr from alphatwirl.selection.modules import All, Any, Not", "_', params) def test_expand_path_cfg(path_cfg, expected, _): actual = expand_path_cfg(path_cfg=path_cfg) assert expected == actual", ": ev.nJets[0] >= 2', ), LambdaStr( name='ev : ev.nJets[0] >= 2', lambda_str='ev :", "2', dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= 2', ), LambdaStr( name='ev :", "lambda_str='ev : ev.nJets[0] >= 5242', ), id='string:lambda_str-not-formatted' ), pytest.param( dict(All=()), {'factory': 'AllFactory', 'components':", ">= 100') ] ), Not( name='Not', selection=Any( name='Any', selections=[ LambdaStr( name='ev : ev.z[0]", "name='Any', selections=[ LambdaStr( name='ev : ev.z[0] == 0', lambda_str='ev : ev.z[0] == 0'", "pytest.param( dict(All=()), {'factory': 'AllFactory', 'components': ()}, All(name='All', selections=[]), id='dict-all-empty' ), pytest.param( dict(Any=()), {'factory':", "Any( name='Any', selections=[ LambdaStr( name='ev : ev.x[0] == 0', lambda_str='ev : ev.x[0] ==", ">= 300', ), ), ), ), ) ) ), Any( name='Any', selections=[ LambdaStr(", "), pytest.param( dict(), ValueError, id='empty-dict' ), ]) def test_expand_path_cfg_raise(path_cfg, error): with pytest.raises(error): expand_path_cfg(path_cfg=path_cfg)", "lambda_str='ev : ev.x[0] >= 1', ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.y[0] >=", "factory='AllFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] >= 1', ), dict( factory='LambdaStrFactory',", "LambdaStr( name='ev : ev.x[0] == 0', lambda_str='ev : ev.x[0] == 0' ), All(", "100', )), dict(Not=dict( Any=( 'ev : ev.z[0] == 0', 'ev : ev.w[0] >=", "##__________________________________________________________________|| # path_cfg, expanded, obj params = [ pytest.param( 'ev : ev.nJets[0] >=", "str(obj) ##__________________________________________________________________|| @pytest.mark.parametrize('path_cfg, error', [ pytest.param( dict(All=(), Any=()), ValueError, id='multiple-vertices-All-Any' ), pytest.param( dict(All=(),", "lambda_str='ev : ev.nJets[0] >= 2', ), LambdaStr( name='ev : ev.nJets[0] >= 2', lambda_str='ev", "@pytest.mark.parametrize('path_cfg, expected, _', params) def test_expand_path_cfg(path_cfg, expected, _): actual = expand_path_cfg(path_cfg=path_cfg) assert expected", "name='ev : ev.z[0] == 0', lambda_str='ev : ev.z[0] == 0' ), LambdaStr( name='ev", "import sys import pytest from alphatwirl.selection.factories.expand import expand_path_cfg from alphatwirl.selection.factories.factory import FactoryDispatcher from", "), All( name='All', selections=[ LambdaStr( name='ev : ev.x[0] >= 1', lambda_str='ev : ev.x[0]", "0', lambda_str='ev : ev.x[0] == 0' ), All( name='All', selections=[ LambdaStr( name='ev :", ") ) ), dict( factory='NotFactory', components=( dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev", "5242', ), id='string:lambda_str-not-formatted' ), pytest.param( dict(All=()), {'factory': 'AllFactory', 'components': ()}, All(name='All', selections=[]), id='dict-all-empty'", "100') ] ), Not( name='Not', selection=Any( name='Any', selections=[ LambdaStr( name='ev : ev.z[0] ==", "), id='example', ## marks=pytest.mark.skip(reason='not fully expanded') ), ] @pytest.mark.parametrize('path_cfg, expected, _', params) def", "lambda_str='ev : ev.z[0] == 0' ), LambdaStr( name='ev : ev.w[0] >= 300', lambda_str='ev", "== str(obj) ##__________________________________________________________________|| @pytest.mark.parametrize('path_cfg, error', [ pytest.param( dict(All=(), Any=()), ValueError, id='multiple-vertices-All-Any' ), pytest.param(", "str(expected) == str(obj) ##__________________________________________________________________|| @pytest.mark.parametrize('path_cfg, error', [ pytest.param( dict(All=(), Any=()), ValueError, id='multiple-vertices-All-Any' ),", "##__________________________________________________________________|| @pytest.mark.parametrize('path_cfg, error', [ pytest.param( dict(All=(), Any=()), ValueError, id='multiple-vertices-All-Any' ), pytest.param( dict(All=(), Not=()),", "ev.x[0] == 0', lambda_str='ev : ev.x[0] == 0' ), All( name='All', selections=[ LambdaStr(", "# path_cfg, expanded, obj params = [ pytest.param( 'ev : ev.nJets[0] >= 2',", "sys import pytest from alphatwirl.selection.factories.expand import expand_path_cfg from alphatwirl.selection.factories.factory import FactoryDispatcher from alphatwirl.selection.modules.LambdaStr", "name='Any', selections=[ LambdaStr( name='ev : ev.x[0] == 0', lambda_str='ev : ev.x[0] == 0'", "params) def test_factory(path_cfg, _, expected): kargs = dict( AllClass=All, AnyClass=Any, NotClass=Not, LambdaStrClass=LambdaStr, n=5242,", "expanded, obj params = [ pytest.param( 'ev : ev.nJets[0] >= 2', dict( components=(),", "== repr(obj) assert str(expected) == str(obj) ##__________________________________________________________________|| @pytest.mark.parametrize('path_cfg, error', [ pytest.param( dict(All=(), Any=()),", "pytest.param( dict(), ValueError, id='empty-dict' ), ]) def test_expand_path_cfg_raise(path_cfg, error): with pytest.raises(error): expand_path_cfg(path_cfg=path_cfg) ##__________________________________________________________________||", "), dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.y[0] >= 100', ) ) ), dict(", "FactoryDispatcher from alphatwirl.selection.modules.LambdaStr import LambdaStr from alphatwirl.selection.modules import All, Any, Not ##__________________________________________________________________|| #", ": ev.x[0] >= 1', lambda_str='ev : ev.x[0] >= 1'), LambdaStr( name='ev : ev.y[0]", "## marks=pytest.mark.skip(reason='not fully expanded') ), ] @pytest.mark.parametrize('path_cfg, expected, _', params) def test_expand_path_cfg(path_cfg, expected,", "), pytest.param( 'ev : ev.nJets[0] >= {n}', dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0]", "), pytest.param( dict(Any=()), {'factory': 'AnyFactory', 'components': ()}, Any(name='Any', selections=[]), id='dict-any-empty' ), pytest.param( dict(Any=(", "Any, Not ##__________________________________________________________________|| # path_cfg, expanded, obj params = [ pytest.param( 'ev :", "actual = expand_path_cfg(path_cfg=actual) assert expected == actual @pytest.mark.parametrize('path_cfg, _, expected', params) def test_factory(path_cfg,", "import pytest from alphatwirl.selection.factories.expand import expand_path_cfg from alphatwirl.selection.factories.factory import FactoryDispatcher from alphatwirl.selection.modules.LambdaStr import", ">= {n}', dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= {n}', ), LambdaStr( name='ev", ": ev.z[0] == 0' ), LambdaStr( name='ev : ev.w[0] >= 300', lambda_str='ev :", "()}, All(name='All', selections=[]), id='dict-all-empty' ), pytest.param( dict(Any=()), {'factory': 'AnyFactory', 'components': ()}, Any(name='Any', selections=[]),", "'ev : ev.x[0] >= 1', 'ev : ev.y[0] >= 100', )), dict(Not=dict( Any=(", "), Any( name='Any', selections=[ LambdaStr( name='ev : ev.x[0] == 0', lambda_str='ev : ev.x[0]", "<filename>tests/unit/selection/factories/test_expand_path_cfg.py<gh_stars>0 # <NAME> <<EMAIL>> import os import sys import pytest from alphatwirl.selection.factories.expand import", "one actual = expand_path_cfg(path_cfg=actual) assert expected == actual @pytest.mark.parametrize('path_cfg, _, expected', params) def", "'AllFactory', 'components': ()}, All(name='All', selections=[]), id='dict-all-empty' ), pytest.param( dict(Any=()), {'factory': 'AnyFactory', 'components': ()},", ": ev.y[0] >= 100', ) ) ), dict( factory='NotFactory', components=( dict( factory='AnyFactory', components=(", "LambdaStr( name='ev : ev.z[0] == 0', lambda_str='ev : ev.z[0] == 0' ), LambdaStr(", "), ), ), ) ) ), Any( name='Any', selections=[ LambdaStr( name='ev : ev.x[0]", "== 0', 'ev : ev.w[0] >= 300', ), )), )), dict( factory='AnyFactory', components=(", "name='ev : ev.nJets[0] >= 5242', lambda_str='ev : ev.nJets[0] >= 5242', ), id='string:lambda_str-not-formatted' ),", "assert expected == actual # give expanded one actual = expand_path_cfg(path_cfg=actual) assert expected", "obj params = [ pytest.param( 'ev : ev.nJets[0] >= 2', dict( components=(), factory='LambdaStrFactory',", "kargs = dict( AllClass=All, AnyClass=Any, NotClass=Not, LambdaStrClass=LambdaStr, n=5242, ) obj = FactoryDispatcher(path_cfg=path_cfg, **kargs)", ")), dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] == 0', ),", "ev.nJets[0] >= 5242', ), id='string:lambda_str-not-formatted' ), pytest.param( dict(All=()), {'factory': 'AllFactory', 'components': ()}, All(name='All',", "dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.w[0] >= 300', ), ), ), ), )", "selections=[ LambdaStr( name='ev : ev.x[0] == 0', lambda_str='ev : ev.x[0] == 0' ),", "test_factory(path_cfg, _, expected): kargs = dict( AllClass=All, AnyClass=Any, NotClass=Not, LambdaStrClass=LambdaStr, n=5242, ) obj", "300' ) ] ) ) ] ), id='example', ## marks=pytest.mark.skip(reason='not fully expanded') ),", "lambda_str='ev : ev.nJets[0] >= 2', ), id='string:lambda_str' ), pytest.param( 'ev : ev.nJets[0] >=", "from alphatwirl.selection.modules.LambdaStr import LambdaStr from alphatwirl.selection.modules import All, Any, Not ##__________________________________________________________________|| # path_cfg,", "), ] @pytest.mark.parametrize('path_cfg, expected, _', params) def test_expand_path_cfg(path_cfg, expected, _): actual = expand_path_cfg(path_cfg=path_cfg)", "lambda_str='ev : ev.w[0] >= 300' ) ] ) ) ] ), id='example', ##", "dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] >= 1', ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev", ": ev.x[0] == 0', lambda_str='ev : ev.x[0] == 0' ), All( name='All', selections=[", ">= 2', lambda_str='ev : ev.nJets[0] >= 2', ), id='string:lambda_str' ), pytest.param( 'ev :", "== 0', lambda_str='ev : ev.z[0] == 0' ), LambdaStr( name='ev : ev.w[0] >=", "= FactoryDispatcher(path_cfg=path_cfg, **kargs) assert repr(expected) == repr(obj) assert str(expected) == str(obj) ##__________________________________________________________________|| @pytest.mark.parametrize('path_cfg,", "ev.z[0] == 0', lambda_str='ev : ev.z[0] == 0' ), LambdaStr( name='ev : ev.w[0]", "ev.x[0] >= 1', lambda_str='ev : ev.x[0] >= 1'), LambdaStr( name='ev : ev.y[0] >=", "2', ), id='string:lambda_str' ), pytest.param( 'ev : ev.nJets[0] >= {n}', dict( components=(), factory='LambdaStrFactory',", "lambda_str='ev : ev.y[0] >= 100', ) ) ), dict( factory='NotFactory', components=( dict( factory='AnyFactory',", ">= 1', lambda_str='ev : ev.x[0] >= 1'), LambdaStr( name='ev : ev.y[0] >= 100',", "pytest.param( dict(All=(), Not=()), ValueError, id='multiple-vertices-All-Not' ), pytest.param( dict(Any=(), Not=()), ValueError, id='multiple-vertices-Any-Not' ), pytest.param(", "ev.nJets[0] >= 2', ), LambdaStr( name='ev : ev.nJets[0] >= 2', lambda_str='ev : ev.nJets[0]", "== 0' ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.w[0] >= 300', ), ),", "), )), )), dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] ==", "assert repr(expected) == repr(obj) assert str(expected) == str(obj) ##__________________________________________________________________|| @pytest.mark.parametrize('path_cfg, error', [ pytest.param(", ">= 5242', lambda_str='ev : ev.nJets[0] >= 5242', ), id='string:lambda_str-not-formatted' ), pytest.param( dict(All=()), {'factory':", "ev.y[0] >= 100', lambda_str='ev : ev.y[0] >= 100') ] ), Not( name='Not', selection=Any(", "**kargs) assert repr(expected) == repr(obj) assert str(expected) == str(obj) ##__________________________________________________________________|| @pytest.mark.parametrize('path_cfg, error', [", "), pytest.param( dict(Any=(), Not=()), ValueError, id='multiple-vertices-Any-Not' ), pytest.param( dict(), ValueError, id='empty-dict' ), ])", "NotClass=Not, LambdaStrClass=LambdaStr, n=5242, ) obj = FactoryDispatcher(path_cfg=path_cfg, **kargs) assert repr(expected) == repr(obj) assert", "FactoryDispatcher(path_cfg=path_cfg, **kargs) assert repr(expected) == repr(obj) assert str(expected) == str(obj) ##__________________________________________________________________|| @pytest.mark.parametrize('path_cfg, error',", "id='dict-all-empty' ), pytest.param( dict(Any=()), {'factory': 'AnyFactory', 'components': ()}, Any(name='Any', selections=[]), id='dict-any-empty' ), pytest.param(", "Not ##__________________________________________________________________|| # path_cfg, expanded, obj params = [ pytest.param( 'ev : ev.nJets[0]", "selections=[]), id='dict-any-empty' ), pytest.param( dict(Any=( 'ev : ev.x[0] == 0', dict(All=( 'ev :", "== 0', dict(All=( 'ev : ev.x[0] >= 1', 'ev : ev.y[0] >= 100',", "lambda_str='ev : ev.z[0] == 0' ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.w[0] >=", ">= 2', ), id='string:lambda_str' ), pytest.param( 'ev : ev.nJets[0] >= {n}', dict( components=(),", "Not=()), ValueError, id='multiple-vertices-All-Not' ), pytest.param( dict(Any=(), Not=()), ValueError, id='multiple-vertices-Any-Not' ), pytest.param( dict(), ValueError,", ": ev.w[0] >= 300', ), )), )), dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(),", ": ev.nJets[0] >= 2', lambda_str='ev : ev.nJets[0] >= 2', ), id='string:lambda_str' ), pytest.param(", ": ev.y[0] >= 100', lambda_str='ev : ev.y[0] >= 100') ] ), Not( name='Not',", "'ev : ev.w[0] >= 300', ), )), )), dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory',", "dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] == 0', ), dict(", "AllClass=All, AnyClass=Any, NotClass=Not, LambdaStrClass=LambdaStr, n=5242, ) obj = FactoryDispatcher(path_cfg=path_cfg, **kargs) assert repr(expected) ==", "ev.x[0] >= 1', 'ev : ev.y[0] >= 100', )), dict(Not=dict( Any=( 'ev :", "_, expected', params) def test_factory(path_cfg, _, expected): kargs = dict( AllClass=All, AnyClass=Any, NotClass=Not,", "ValueError, id='multiple-vertices-All-Not' ), pytest.param( dict(Any=(), Not=()), ValueError, id='multiple-vertices-Any-Not' ), pytest.param( dict(), ValueError, id='empty-dict'", "] ) ) ] ), id='example', ## marks=pytest.mark.skip(reason='not fully expanded') ), ] @pytest.mark.parametrize('path_cfg,", "== actual @pytest.mark.parametrize('path_cfg, _, expected', params) def test_factory(path_cfg, _, expected): kargs = dict(", "== 0', ), dict( factory='AllFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] >=", "1'), LambdaStr( name='ev : ev.y[0] >= 100', lambda_str='ev : ev.y[0] >= 100') ]", "dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= {n}', ), LambdaStr( name='ev : ev.nJets[0]", "@pytest.mark.parametrize('path_cfg, _, expected', params) def test_factory(path_cfg, _, expected): kargs = dict( AllClass=All, AnyClass=Any,", "ev.w[0] >= 300', ), ), ), ), ) ) ), Any( name='Any', selections=[", "expected == actual # give expanded one actual = expand_path_cfg(path_cfg=actual) assert expected ==", ">= 2', ), LambdaStr( name='ev : ev.nJets[0] >= 2', lambda_str='ev : ev.nJets[0] >=", "ev.nJets[0] >= 2', dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= 2', ), LambdaStr(", "components=(), lambda_str='ev : ev.y[0] >= 100', ) ) ), dict( factory='NotFactory', components=( dict(", "), pytest.param( dict(All=(), Not=()), ValueError, id='multiple-vertices-All-Not' ), pytest.param( dict(Any=(), Not=()), ValueError, id='multiple-vertices-Any-Not' ),", "give expanded one actual = expand_path_cfg(path_cfg=actual) assert expected == actual @pytest.mark.parametrize('path_cfg, _, expected',", "All( name='All', selections=[ LambdaStr( name='ev : ev.x[0] >= 1', lambda_str='ev : ev.x[0] >=", "selections=[ LambdaStr( name='ev : ev.x[0] >= 1', lambda_str='ev : ev.x[0] >= 1'), LambdaStr(", "dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] == 0', ), dict( factory='AllFactory', components=( dict(", "pytest.param( dict(All=(), Any=()), ValueError, id='multiple-vertices-All-Any' ), pytest.param( dict(All=(), Not=()), ValueError, id='multiple-vertices-All-Not' ), pytest.param(", "Not( name='Not', selection=Any( name='Any', selections=[ LambdaStr( name='ev : ev.z[0] == 0', lambda_str='ev :", "expected', params) def test_factory(path_cfg, _, expected): kargs = dict( AllClass=All, AnyClass=Any, NotClass=Not, LambdaStrClass=LambdaStr,", "components=(), lambda_str='ev : ev.x[0] >= 1', ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.y[0]", "ev.w[0] >= 300' ) ] ) ) ] ), id='example', ## marks=pytest.mark.skip(reason='not fully", "params = [ pytest.param( 'ev : ev.nJets[0] >= 2', dict( components=(), factory='LambdaStrFactory', lambda_str='ev", "0', dict(All=( 'ev : ev.x[0] >= 1', 'ev : ev.y[0] >= 100', )),", "id='multiple-vertices-All-Not' ), pytest.param( dict(Any=(), Not=()), ValueError, id='multiple-vertices-Any-Not' ), pytest.param( dict(), ValueError, id='empty-dict' ),", "factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= 2', ), LambdaStr( name='ev : ev.nJets[0] >= 2',", "id='example', ## marks=pytest.mark.skip(reason='not fully expanded') ), ] @pytest.mark.parametrize('path_cfg, expected, _', params) def test_expand_path_cfg(path_cfg,", "== 0' ), LambdaStr( name='ev : ev.w[0] >= 300', lambda_str='ev : ev.w[0] >=", "factory='LambdaStrFactory', components=(), lambda_str='ev : ev.y[0] >= 100', ) ) ), dict( factory='NotFactory', components=(", "components=(), lambda_str='ev : ev.x[0] == 0', ), dict( factory='AllFactory', components=( dict( factory='LambdaStrFactory', components=(),", "'components': ()}, All(name='All', selections=[]), id='dict-all-empty' ), pytest.param( dict(Any=()), {'factory': 'AnyFactory', 'components': ()}, Any(name='Any',", ") obj = FactoryDispatcher(path_cfg=path_cfg, **kargs) assert repr(expected) == repr(obj) assert str(expected) == str(obj)", "fully expanded') ), ] @pytest.mark.parametrize('path_cfg, expected, _', params) def test_expand_path_cfg(path_cfg, expected, _): actual", "# <NAME> <<EMAIL>> import os import sys import pytest from alphatwirl.selection.factories.expand import expand_path_cfg", "dict( factory='NotFactory', components=( dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.z[0] ==", "expected, _): actual = expand_path_cfg(path_cfg=path_cfg) assert expected == actual # give expanded one", "), LambdaStr( name='ev : ev.w[0] >= 300', lambda_str='ev : ev.w[0] >= 300' )", "import FactoryDispatcher from alphatwirl.selection.modules.LambdaStr import LambdaStr from alphatwirl.selection.modules import All, Any, Not ##__________________________________________________________________||", "expected, _', params) def test_expand_path_cfg(path_cfg, expected, _): actual = expand_path_cfg(path_cfg=path_cfg) assert expected ==", "ev.x[0] >= 1'), LambdaStr( name='ev : ev.y[0] >= 100', lambda_str='ev : ev.y[0] >=", "), Not( name='Not', selection=Any( name='Any', selections=[ LambdaStr( name='ev : ev.z[0] == 0', lambda_str='ev", "'ev : ev.z[0] == 0', 'ev : ev.w[0] >= 300', ), )), )),", "alphatwirl.selection.modules import All, Any, Not ##__________________________________________________________________|| # path_cfg, expanded, obj params = [", "# give expanded one actual = expand_path_cfg(path_cfg=actual) assert expected == actual @pytest.mark.parametrize('path_cfg, _,", "dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.z[0] == 0' ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev", "300', ), )), )), dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0]", "from alphatwirl.selection.modules import All, Any, Not ##__________________________________________________________________|| # path_cfg, expanded, obj params =", ")), dict(Not=dict( Any=( 'ev : ev.z[0] == 0', 'ev : ev.w[0] >= 300',", ">= 100', lambda_str='ev : ev.y[0] >= 100') ] ), Not( name='Not', selection=Any( name='Any',", "repr(expected) == repr(obj) assert str(expected) == str(obj) ##__________________________________________________________________|| @pytest.mark.parametrize('path_cfg, error', [ pytest.param( dict(All=(),", "expanded') ), ] @pytest.mark.parametrize('path_cfg, expected, _', params) def test_expand_path_cfg(path_cfg, expected, _): actual =", ": ev.nJets[0] >= {n}', dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= {n}', ),", "name='Not', selection=Any( name='Any', selections=[ LambdaStr( name='ev : ev.z[0] == 0', lambda_str='ev : ev.z[0]", "[ pytest.param( 'ev : ev.nJets[0] >= 2', dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0]", "2', ), LambdaStr( name='ev : ev.nJets[0] >= 2', lambda_str='ev : ev.nJets[0] >= 2',", "] @pytest.mark.parametrize('path_cfg, expected, _', params) def test_expand_path_cfg(path_cfg, expected, _): actual = expand_path_cfg(path_cfg=path_cfg) assert", "ev.x[0] == 0', ), dict( factory='AllFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0]", "ValueError, id='multiple-vertices-All-Any' ), pytest.param( dict(All=(), Not=()), ValueError, id='multiple-vertices-All-Not' ), pytest.param( dict(Any=(), Not=()), ValueError,", "name='ev : ev.x[0] == 0', lambda_str='ev : ev.x[0] == 0' ), All( name='All',", "os import sys import pytest from alphatwirl.selection.factories.expand import expand_path_cfg from alphatwirl.selection.factories.factory import FactoryDispatcher", "LambdaStr( name='ev : ev.nJets[0] >= 2', lambda_str='ev : ev.nJets[0] >= 2', ), id='string:lambda_str'", "<NAME> <<EMAIL>> import os import sys import pytest from alphatwirl.selection.factories.expand import expand_path_cfg from", "expand_path_cfg(path_cfg=actual) assert expected == actual @pytest.mark.parametrize('path_cfg, _, expected', params) def test_factory(path_cfg, _, expected):", ") ) ), Any( name='Any', selections=[ LambdaStr( name='ev : ev.x[0] == 0', lambda_str='ev", "dict(Any=( 'ev : ev.x[0] == 0', dict(All=( 'ev : ev.x[0] >= 1', 'ev", "pytest.param( 'ev : ev.nJets[0] >= {n}', dict( components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >=", "'ev : ev.y[0] >= 100', )), dict(Not=dict( Any=( 'ev : ev.z[0] == 0',", "LambdaStr( name='ev : ev.w[0] >= 300', lambda_str='ev : ev.w[0] >= 300' ) ]", "import expand_path_cfg from alphatwirl.selection.factories.factory import FactoryDispatcher from alphatwirl.selection.modules.LambdaStr import LambdaStr from alphatwirl.selection.modules import", ") ] ), id='example', ## marks=pytest.mark.skip(reason='not fully expanded') ), ] @pytest.mark.parametrize('path_cfg, expected, _',", "Any(name='Any', selections=[]), id='dict-any-empty' ), pytest.param( dict(Any=( 'ev : ev.x[0] == 0', dict(All=( 'ev", ">= 1'), LambdaStr( name='ev : ev.y[0] >= 100', lambda_str='ev : ev.y[0] >= 100')", "expected == actual @pytest.mark.parametrize('path_cfg, _, expected', params) def test_factory(path_cfg, _, expected): kargs =", "ev.w[0] >= 300', ), )), )), dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev", "), dict( factory='AllFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] >= 1', ),", "LambdaStr from alphatwirl.selection.modules import All, Any, Not ##__________________________________________________________________|| # path_cfg, expanded, obj params", "ev.x[0] == 0' ), All( name='All', selections=[ LambdaStr( name='ev : ev.x[0] >= 1',", ": ev.w[0] >= 300', lambda_str='ev : ev.w[0] >= 300' ) ] ) )", "expanded one actual = expand_path_cfg(path_cfg=actual) assert expected == actual @pytest.mark.parametrize('path_cfg, _, expected', params)", "'AnyFactory', 'components': ()}, Any(name='Any', selections=[]), id='dict-any-empty' ), pytest.param( dict(Any=( 'ev : ev.x[0] ==", "0' ), All( name='All', selections=[ LambdaStr( name='ev : ev.x[0] >= 1', lambda_str='ev :", "import LambdaStr from alphatwirl.selection.modules import All, Any, Not ##__________________________________________________________________|| # path_cfg, expanded, obj", ": ev.x[0] >= 1', 'ev : ev.y[0] >= 100', )), dict(Not=dict( Any=( 'ev", "1', lambda_str='ev : ev.x[0] >= 1'), LambdaStr( name='ev : ev.y[0] >= 100', lambda_str='ev", "factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] >= 1', ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev :", "== 0' ), All( name='All', selections=[ LambdaStr( name='ev : ev.x[0] >= 1', lambda_str='ev", "test_expand_path_cfg(path_cfg, expected, _): actual = expand_path_cfg(path_cfg=path_cfg) assert expected == actual # give expanded", ">= 1', 'ev : ev.y[0] >= 100', )), dict(Not=dict( Any=( 'ev : ev.z[0]", "2', lambda_str='ev : ev.nJets[0] >= 2', ), id='string:lambda_str' ), pytest.param( 'ev : ev.nJets[0]", "LambdaStr( name='ev : ev.nJets[0] >= 5242', lambda_str='ev : ev.nJets[0] >= 5242', ), id='string:lambda_str-not-formatted'", "dict(Any=(), Not=()), ValueError, id='multiple-vertices-Any-Not' ), pytest.param( dict(), ValueError, id='empty-dict' ), ]) def test_expand_path_cfg_raise(path_cfg,", "ev.y[0] >= 100') ] ), Not( name='Not', selection=Any( name='Any', selections=[ LambdaStr( name='ev :", "0', 'ev : ev.w[0] >= 300', ), )), )), dict( factory='AnyFactory', components=( dict(", "id='multiple-vertices-Any-Not' ), pytest.param( dict(), ValueError, id='empty-dict' ), ]) def test_expand_path_cfg_raise(path_cfg, error): with pytest.raises(error):", "selections=[ LambdaStr( name='ev : ev.z[0] == 0', lambda_str='ev : ev.z[0] == 0' ),", "0' ), LambdaStr( name='ev : ev.w[0] >= 300', lambda_str='ev : ev.w[0] >= 300'", "expand_path_cfg(path_cfg=path_cfg) assert expected == actual # give expanded one actual = expand_path_cfg(path_cfg=actual) assert", ") ) ] ), id='example', ## marks=pytest.mark.skip(reason='not fully expanded') ), ] @pytest.mark.parametrize('path_cfg, expected,", "components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= 2', ), LambdaStr( name='ev : ev.nJets[0] >=", "error', [ pytest.param( dict(All=(), Any=()), ValueError, id='multiple-vertices-All-Any' ), pytest.param( dict(All=(), Not=()), ValueError, id='multiple-vertices-All-Not'", "<<EMAIL>> import os import sys import pytest from alphatwirl.selection.factories.expand import expand_path_cfg from alphatwirl.selection.factories.factory", "AnyClass=Any, NotClass=Not, LambdaStrClass=LambdaStr, n=5242, ) obj = FactoryDispatcher(path_cfg=path_cfg, **kargs) assert repr(expected) == repr(obj)", "factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= {n}', ), LambdaStr( name='ev : ev.nJets[0] >= 5242',", "ev.z[0] == 0', 'ev : ev.w[0] >= 300', ), )), )), dict( factory='AnyFactory',", "'components': ()}, Any(name='Any', selections=[]), id='dict-any-empty' ), pytest.param( dict(Any=( 'ev : ev.x[0] == 0',", "= expand_path_cfg(path_cfg=path_cfg) assert expected == actual # give expanded one actual = expand_path_cfg(path_cfg=actual)", "), LambdaStr( name='ev : ev.nJets[0] >= 5242', lambda_str='ev : ev.nJets[0] >= 5242', ),", ": ev.nJets[0] >= {n}', ), LambdaStr( name='ev : ev.nJets[0] >= 5242', lambda_str='ev :", ": ev.nJets[0] >= 5242', lambda_str='ev : ev.nJets[0] >= 5242', ), id='string:lambda_str-not-formatted' ), pytest.param(", "alphatwirl.selection.modules.LambdaStr import LambdaStr from alphatwirl.selection.modules import All, Any, Not ##__________________________________________________________________|| # path_cfg, expanded,", "expand_path_cfg from alphatwirl.selection.factories.factory import FactoryDispatcher from alphatwirl.selection.modules.LambdaStr import LambdaStr from alphatwirl.selection.modules import All,", "lambda_str='ev : ev.w[0] >= 300', ), ), ), ), ) ) ), Any(", "0' ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.w[0] >= 300', ), ), ),", ": ev.x[0] == 0', dict(All=( 'ev : ev.x[0] >= 1', 'ev : ev.y[0]", "), LambdaStr( name='ev : ev.nJets[0] >= 2', lambda_str='ev : ev.nJets[0] >= 2', ),", ") ), Any( name='Any', selections=[ LambdaStr( name='ev : ev.x[0] == 0', lambda_str='ev :", "ev.nJets[0] >= 5242', lambda_str='ev : ev.nJets[0] >= 5242', ), id='string:lambda_str-not-formatted' ), pytest.param( dict(All=()),", "dict(All=(), Any=()), ValueError, id='multiple-vertices-All-Any' ), pytest.param( dict(All=(), Not=()), ValueError, id='multiple-vertices-All-Not' ), pytest.param( dict(Any=(),", ">= 300' ) ] ) ) ] ), id='example', ## marks=pytest.mark.skip(reason='not fully expanded')", "name='ev : ev.y[0] >= 100', lambda_str='ev : ev.y[0] >= 100') ] ), Not(", "), id='string:lambda_str-not-formatted' ), pytest.param( dict(All=()), {'factory': 'AllFactory', 'components': ()}, All(name='All', selections=[]), id='dict-all-empty' ),", "lambda_str='ev : ev.y[0] >= 100') ] ), Not( name='Not', selection=Any( name='Any', selections=[ LambdaStr(", "All(name='All', selections=[]), id='dict-all-empty' ), pytest.param( dict(Any=()), {'factory': 'AnyFactory', 'components': ()}, Any(name='Any', selections=[]), id='dict-any-empty'", "300', lambda_str='ev : ev.w[0] >= 300' ) ] ) ) ] ), id='example',", "path_cfg, expanded, obj params = [ pytest.param( 'ev : ev.nJets[0] >= 2', dict(", "id='string:lambda_str' ), pytest.param( 'ev : ev.nJets[0] >= {n}', dict( components=(), factory='LambdaStrFactory', lambda_str='ev :", "name='ev : ev.w[0] >= 300', lambda_str='ev : ev.w[0] >= 300' ) ] )", "1', ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.y[0] >= 100', ) ) ),", "components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] == 0', ), dict( factory='AllFactory', components=(", ">= 100', )), dict(Not=dict( Any=( 'ev : ev.z[0] == 0', 'ev : ev.w[0]", "ev.y[0] >= 100', ) ) ), dict( factory='NotFactory', components=( dict( factory='AnyFactory', components=( dict(", "from alphatwirl.selection.factories.expand import expand_path_cfg from alphatwirl.selection.factories.factory import FactoryDispatcher from alphatwirl.selection.modules.LambdaStr import LambdaStr from", "dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.z[0] == 0' ), dict(", ": ev.nJets[0] >= 2', ), id='string:lambda_str' ), pytest.param( 'ev : ev.nJets[0] >= {n}',", "pytest.param( dict(Any=( 'ev : ev.x[0] == 0', dict(All=( 'ev : ev.x[0] >= 1',", "), ), ) ) ), Any( name='Any', selections=[ LambdaStr( name='ev : ev.x[0] ==", "def test_factory(path_cfg, _, expected): kargs = dict( AllClass=All, AnyClass=Any, NotClass=Not, LambdaStrClass=LambdaStr, n=5242, )", ">= 100', ) ) ), dict( factory='NotFactory', components=( dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory',", "), ) ) ), Any( name='Any', selections=[ LambdaStr( name='ev : ev.x[0] == 0',", ") ] ) ) ] ), id='example', ## marks=pytest.mark.skip(reason='not fully expanded') ), ]", "Any=( 'ev : ev.z[0] == 0', 'ev : ev.w[0] >= 300', ), )),", "] ), Not( name='Not', selection=Any( name='Any', selections=[ LambdaStr( name='ev : ev.z[0] == 0',", ": ev.y[0] >= 100') ] ), Not( name='Not', selection=Any( name='Any', selections=[ LambdaStr( name='ev", "), ), ), ), ) ) ), Any( name='Any', selections=[ LambdaStr( name='ev :", "5242', lambda_str='ev : ev.nJets[0] >= 5242', ), id='string:lambda_str-not-formatted' ), pytest.param( dict(All=()), {'factory': 'AllFactory',", "), pytest.param( dict(Any=( 'ev : ev.x[0] == 0', dict(All=( 'ev : ev.x[0] >=", "{'factory': 'AllFactory', 'components': ()}, All(name='All', selections=[]), id='dict-all-empty' ), pytest.param( dict(Any=()), {'factory': 'AnyFactory', 'components':", ": ev.nJets[0] >= 5242', ), id='string:lambda_str-not-formatted' ), pytest.param( dict(All=()), {'factory': 'AllFactory', 'components': ()},", "ev.x[0] >= 1', ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.y[0] >= 100', )", ": ev.w[0] >= 300', ), ), ), ), ) ) ), Any( name='Any',", ": ev.x[0] == 0', ), dict( factory='AllFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev :", "selection=Any( name='Any', selections=[ LambdaStr( name='ev : ev.z[0] == 0', lambda_str='ev : ev.z[0] ==", "ev.nJets[0] >= 2', ), id='string:lambda_str' ), pytest.param( 'ev : ev.nJets[0] >= {n}', dict(", ": ev.w[0] >= 300' ) ] ) ) ] ), id='example', ## marks=pytest.mark.skip(reason='not", "== 0', lambda_str='ev : ev.x[0] == 0' ), All( name='All', selections=[ LambdaStr( name='ev", "components=(), factory='LambdaStrFactory', lambda_str='ev : ev.nJets[0] >= {n}', ), LambdaStr( name='ev : ev.nJets[0] >=", ">= 1', ), dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.y[0] >= 100', ) )", "alphatwirl.selection.factories.expand import expand_path_cfg from alphatwirl.selection.factories.factory import FactoryDispatcher from alphatwirl.selection.modules.LambdaStr import LambdaStr from alphatwirl.selection.modules", "), dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.w[0] >= 300', ), ), ), ),", "ev.nJets[0] >= {n}', ), LambdaStr( name='ev : ev.nJets[0] >= 5242', lambda_str='ev : ev.nJets[0]", ">= 300', ), )), )), dict( factory='AnyFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev :", "ev.w[0] >= 300', lambda_str='ev : ev.w[0] >= 300' ) ] ) ) ]", "components=(), lambda_str='ev : ev.w[0] >= 300', ), ), ), ), ) ) ),", "name='ev : ev.x[0] >= 1', lambda_str='ev : ev.x[0] >= 1'), LambdaStr( name='ev :", "ev.nJets[0] >= 2', lambda_str='ev : ev.nJets[0] >= 2', ), id='string:lambda_str' ), pytest.param( 'ev", "dict(All=( 'ev : ev.x[0] >= 1', 'ev : ev.y[0] >= 100', )), dict(Not=dict(", "0', ), dict( factory='AllFactory', components=( dict( factory='LambdaStrFactory', components=(), lambda_str='ev : ev.x[0] >= 1',", "All, Any, Not ##__________________________________________________________________|| # path_cfg, expanded, obj params = [ pytest.param( 'ev", "id='dict-any-empty' ), pytest.param( dict(Any=( 'ev : ev.x[0] == 0', dict(All=( 'ev : ev.x[0]", "[ pytest.param( dict(All=(), Any=()), ValueError, id='multiple-vertices-All-Any' ), pytest.param( dict(All=(), Not=()), ValueError, id='multiple-vertices-All-Not' ),", "name='All', selections=[ LambdaStr( name='ev : ev.x[0] >= 1', lambda_str='ev : ev.x[0] >= 1'),", "LambdaStrClass=LambdaStr, n=5242, ) obj = FactoryDispatcher(path_cfg=path_cfg, **kargs) assert repr(expected) == repr(obj) assert str(expected)", "'ev : ev.x[0] == 0', dict(All=( 'ev : ev.x[0] >= 1', 'ev :", "_): actual = expand_path_cfg(path_cfg=path_cfg) assert expected == actual # give expanded one actual" ]
[ "== 23 : return 'twenty three' if n == 24 : return 'twenty", "try : n = int(input('Number ? ')) print (f'{n:,} = {terbilang(n)}') except :", "10) +'ty'+ (terbilang(n % 10) if n % 10 !=0 else '') else", "return 'thirty' if n == 50 : return 'fifty' return terbilang (n //", "else '') elif n >= 20 : if n == 21 : return", "million ' + (terbilang(n % 1_000_000) if n % 1_000_000 != 0 else", "+'ty'+ (terbilang(n % 10) if n % 10 !=0 else '') else :", "22 : return 'twenty two' if n == 23 : return 'twenty three'", "1_000_000_000 : return terbilang (n // 1_000_000_000) + ' billion ' + (terbilang(n", "1_000_000 : return terbilang (n // 1_000_000) + ' million ' + (terbilang(n", "12 : return 'twelve' elif n == 13 : return 'thirteen' elif n", "'fifteen' else : return terbilang (n % 10) + 'teen' while True :", "% 1_000_000) if n % 1_000_000 != 0 else '') elif n >=", "+ ' thouosand ' + (terbilang(n % 1_000) if n % 1_000 !=0", "+ ' billion ' + (terbilang(n % 1_000_000_000) if n % 1_000_000_000 !=", "terbilang (n // 1_000) + ' thouosand ' + (terbilang(n % 1_000) if", "!= 0 else '') elif n >= 1_000_000 : return terbilang (n //", "os.system('cls') try : n = int(input('Number ? ')) print (f'{n:,} = {terbilang(n)}') except", "100 : if n // 100 == 1 : return 'one hundred' +", "+ ' million ' + (terbilang(n % 1_000_000) if n % 1_000_000 !=", "' billion ' + (terbilang(n % 1_000_000_000) if n % 1_000_000_000 != 0", "< 10 : return kata [n] elif n >= 1_000_000_000 : return terbilang", ": return 'fourteen' elif n == 15 : return 'fifteen' else : return", "['', 'one', 'two', 'tree', 'four', 'five', 'six', 'seven', 'eight', 'nine'] def terbilang (n)", "+ 'hundred' + (terbilang(n % 100) if n % 100 !=0 else '')", "elif n >= 20 : if n == 21 : return 'twenty one'", "(n // 10) +'ty'+ (terbilang(n % 10) if n % 10 !=0 else", "'twenty seven' if n == 28 : return 'twenty eight' if n ==", "0 else '') else : return terbilang (n // 100) + 'hundred' +", "return 'fifty' return terbilang (n // 10) +'ty'+ (terbilang(n % 10) if n", "% 1_000_000_000 != 0 else '') elif n >= 1_000_000 : return terbilang", "!=0 else '') else : if n == 10 : return 'ten' elif", "24 : return 'twenty four' if n == 25 : return 'twenty five'", "n == 15 : return 'fifteen' else : return terbilang (n % 10)", "+ (terbilang(n % 100) if n % 100 != 0 else '') else", "one' if n == 22 : return 'twenty two' if n == 23", "10 !=0 else '') else : if n == 10 : return 'ten'", "return 'twenty five' if n == 26 : return 'twenty six' if n", "else '') else : return terbilang (n // 1_000) + ' thouosand '", ">= 20 : if n == 21 : return 'twenty one' if n", "1_000_000) + ' million ' + (terbilang(n % 1_000_000) if n % 1_000_000", "% 10) if n % 10 !=0 else '') else : if n", "return 'fourteen' elif n == 15 : return 'fifteen' else : return terbilang", ": return 'twelve' elif n == 13 : return 'thirteen' elif n ==", "'twenty five' if n == 26 : return 'twenty six' if n ==", "if n == 24 : return 'twenty four' if n == 25 :", "'teen' while True : os.system('cls') try : n = int(input('Number ? ')) print", "'') elif n >= 100 : if n // 100 == 1 :", ">= 1_000 : if n // 1_000 == 1 : return 'one thousand'", "elif n == 14 : return 'fourteen' elif n == 15 : return", "(terbilang(n % 1_000) if n % 1_000 != 0 else '') else :", "21 : return 'twenty one' if n == 22 : return 'twenty two'", "terbilang (n) : if n < 10 : return kata [n] elif n", "n == 25 : return 'twenty five' if n == 26 : return", "(n // 1_000_000_000) + ' billion ' + (terbilang(n % 1_000_000_000) if n", "'four', 'five', 'six', 'seven', 'eight', 'nine'] def terbilang (n) : if n <", "if n == 25 : return 'twenty five' if n == 26 :", "'two', 'tree', 'four', 'five', 'six', 'seven', 'eight', 'nine'] def terbilang (n) : if", "n == 14 : return 'fourteen' elif n == 15 : return 'fifteen'", "thouosand ' + (terbilang(n % 1_000) if n % 1_000 !=0 else '')", "if n < 10 : return kata [n] elif n >= 1_000_000_000 :", "five' if n == 26 : return 'twenty six' if n == 27", "'') else : if n == 10 : return 'ten' elif n ==", "n = int(input('Number ? ')) print (f'{n:,} = {terbilang(n)}') except : print('ty again", "1_000 !=0 else '') elif n >= 100 : if n // 100", "return 'twenty two' if n == 23 : return 'twenty three' if n", "== 14 : return 'fourteen' elif n == 15 : return 'fifteen' else", "if n % 1_000 !=0 else '') elif n >= 100 : if", "elif n >= 1_000 : if n // 1_000 == 1 : return", ": if n < 10 : return kata [n] elif n >= 1_000_000_000", "!= 0 else '') else : return terbilang (n // 100) + 'hundred'", "if n // 100 == 1 : return 'one hundred' + (terbilang(n %", "'') elif n >= 1_000 : if n // 1_000 == 1 :", "elif n == 15 : return 'fifteen' else : return terbilang (n %", "11 : return 'eleven' elif n == 12 : return 'twelve' elif n", "'') elif n >= 1_000_000 : return terbilang (n // 1_000_000) + '", "29 : return 'twenty nine' if n == 30 : return 'thirty' if", "+ (terbilang(n % 1_000) if n % 1_000 != 0 else '') else", "n == 28 : return 'twenty eight' if n == 29 : return", "10 : return 'ten' elif n == 11 : return 'eleven' elif n", ": if n // 1_000 == 1 : return 'one thousand' + (terbilang(n", "25 : return 'twenty five' if n == 26 : return 'twenty six'", "else '') else : if n == 10 : return 'ten' elif n", "% 1_000_000_000) if n % 1_000_000_000 != 0 else '') elif n >=", "'twenty two' if n == 23 : return 'twenty three' if n ==", "(terbilang(n % 1_000_000) if n % 1_000_000 != 0 else '') elif n", "if n % 1_000_000 != 0 else '') elif n >= 1_000 :", "== 29 : return 'twenty nine' if n == 30 : return 'thirty'", "n == 11 : return 'eleven' elif n == 12 : return 'twelve'", "= ['', 'one', 'two', 'tree', 'four', 'five', 'six', 'seven', 'eight', 'nine'] def terbilang", "' thouosand ' + (terbilang(n % 1_000) if n % 1_000 !=0 else", "os kata = ['', 'one', 'two', 'tree', 'four', 'five', 'six', 'seven', 'eight', 'nine']", ">= 100 : if n // 100 == 1 : return 'one hundred'", "else : return terbilang (n % 10) + 'teen' while True : os.system('cls')", "% 100 != 0 else '') else : return terbilang (n // 100)", "seven' if n == 28 : return 'twenty eight' if n == 29", "// 1_000_000) + ' million ' + (terbilang(n % 1_000_000) if n %", "20 : if n == 21 : return 'twenty one' if n ==", "elif n >= 100 : if n // 100 == 1 : return", "n >= 1_000 : if n // 1_000 == 1 : return 'one", "== 30 : return 'thirty' if n == 50 : return 'fifty' return", ">= 1_000_000 : return terbilang (n // 1_000_000) + ' million ' +", "1_000) + ' thouosand ' + (terbilang(n % 1_000) if n % 1_000", "% 1_000_000 != 0 else '') elif n >= 1_000 : if n", "else '') else : return terbilang (n // 100) + 'hundred' + (terbilang(n", "1_000 == 1 : return 'one thousand' + (terbilang(n % 1_000) if n", "kata = ['', 'one', 'two', 'tree', 'four', 'five', 'six', 'seven', 'eight', 'nine'] def", "1_000) if n % 1_000 !=0 else '') elif n >= 100 :", "10) if n % 10 !=0 else '') else : if n ==", "'hundred' + (terbilang(n % 100) if n % 100 !=0 else '') elif", "nine' if n == 30 : return 'thirty' if n == 50 :", "return terbilang (n // 10) +'ty'+ (terbilang(n % 10) if n % 10", "terbilang (n // 100) + 'hundred' + (terbilang(n % 100) if n %", "'six', 'seven', 'eight', 'nine'] def terbilang (n) : if n < 10 :", "(terbilang(n % 1_000_000_000) if n % 1_000_000_000 != 0 else '') elif n", ": if n == 21 : return 'twenty one' if n == 22", "else '') elif n >= 100 : if n // 100 == 1", "billion ' + (terbilang(n % 1_000_000_000) if n % 1_000_000_000 != 0 else", "(n) : if n < 10 : return kata [n] elif n >=", "100 != 0 else '') else : return terbilang (n // 100) +", ": if n // 100 == 1 : return 'one hundred' + (terbilang(n", "100) if n % 100 !=0 else '') elif n >= 20 :", "' + (terbilang(n % 1_000_000_000) if n % 1_000_000_000 != 0 else '')", "n >= 100 : if n // 100 == 1 : return 'one", "return 'twenty seven' if n == 28 : return 'twenty eight' if n", "100) if n % 100 != 0 else '') else : return terbilang", "'') elif n >= 20 : if n == 21 : return 'twenty", "'tree', 'four', 'five', 'six', 'seven', 'eight', 'nine'] def terbilang (n) : if n", "n == 10 : return 'ten' elif n == 11 : return 'eleven'", "' million ' + (terbilang(n % 1_000_000) if n % 1_000_000 != 0", "n == 26 : return 'twenty six' if n == 27 : return", "n == 23 : return 'twenty three' if n == 24 : return", "return 'twenty nine' if n == 30 : return 'thirty' if n ==", ": n = int(input('Number ? ')) print (f'{n:,} = {terbilang(n)}') except : print('ty", "14 : return 'fourteen' elif n == 15 : return 'fifteen' else :", "// 1_000_000_000) + ' billion ' + (terbilang(n % 1_000_000_000) if n %", "% 10) + 'teen' while True : os.system('cls') try : n = int(input('Number", "1_000_000_000 != 0 else '') elif n >= 1_000_000 : return terbilang (n", "elif n == 13 : return 'thirteen' elif n == 14 : return", "n % 1_000 !=0 else '') elif n >= 100 : if n", ": return 'twenty seven' if n == 28 : return 'twenty eight' if", "thousand' + (terbilang(n % 1_000) if n % 1_000 != 0 else '')", "== 21 : return 'twenty one' if n == 22 : return 'twenty", "if n == 29 : return 'twenty nine' if n == 30 :", "if n == 21 : return 'twenty one' if n == 22 :", "26 : return 'twenty six' if n == 27 : return 'twenty seven'", "== 11 : return 'eleven' elif n == 12 : return 'twelve' elif", "% 100) if n % 100 != 0 else '') else : return", "n // 1_000 == 1 : return 'one thousand' + (terbilang(n % 1_000)", "10) + 'teen' while True : os.system('cls') try : n = int(input('Number ?", "(n // 1_000_000) + ' million ' + (terbilang(n % 1_000_000) if n", ": return 'twenty nine' if n == 30 : return 'thirty' if n", ": return 'twenty two' if n == 23 : return 'twenty three' if", "== 25 : return 'twenty five' if n == 26 : return 'twenty", "== 26 : return 'twenty six' if n == 27 : return 'twenty", "30 : return 'thirty' if n == 50 : return 'fifty' return terbilang", "terbilang (n // 10) +'ty'+ (terbilang(n % 10) if n % 10 !=0", "return 'eleven' elif n == 12 : return 'twelve' elif n == 13", "n % 100 != 0 else '') else : return terbilang (n //", "== 12 : return 'twelve' elif n == 13 : return 'thirteen' elif", "if n % 1_000_000_000 != 0 else '') elif n >= 1_000_000 :", "return terbilang (n // 1_000_000_000) + ' billion ' + (terbilang(n % 1_000_000_000)", "n >= 1_000_000_000 : return terbilang (n // 1_000_000_000) + ' billion '", "while True : os.system('cls') try : n = int(input('Number ? ')) print (f'{n:,}", "0 else '') else : return terbilang (n // 1_000) + ' thouosand", "if n % 10 !=0 else '') else : if n == 10", "(n // 1_000) + ' thouosand ' + (terbilang(n % 1_000) if n", "return 'twenty three' if n == 24 : return 'twenty four' if n", "'twenty four' if n == 25 : return 'twenty five' if n ==", ": return 'twenty eight' if n == 29 : return 'twenty nine' if", "!= 0 else '') elif n >= 1_000 : if n // 1_000", "1_000 != 0 else '') else : return terbilang (n // 1_000) +", "'thirty' if n == 50 : return 'fifty' return terbilang (n // 10)", "== 22 : return 'twenty two' if n == 23 : return 'twenty", "terbilang (n // 1_000_000) + ' million ' + (terbilang(n % 1_000_000) if", "six' if n == 27 : return 'twenty seven' if n == 28", "== 10 : return 'ten' elif n == 11 : return 'eleven' elif", "// 1_000 == 1 : return 'one thousand' + (terbilang(n % 1_000) if", "return 'thirteen' elif n == 14 : return 'fourteen' elif n == 15", "n == 13 : return 'thirteen' elif n == 14 : return 'fourteen'", "// 1_000) + ' thouosand ' + (terbilang(n % 1_000) if n %", "[n] elif n >= 1_000_000_000 : return terbilang (n // 1_000_000_000) + '", "'twenty one' if n == 22 : return 'twenty two' if n ==", ": return 'thirteen' elif n == 14 : return 'fourteen' elif n ==", "return 'fifteen' else : return terbilang (n % 10) + 'teen' while True", "== 24 : return 'twenty four' if n == 25 : return 'twenty", "(n % 10) + 'teen' while True : os.system('cls') try : n =", "n < 10 : return kata [n] elif n >= 1_000_000_000 : return", "n >= 1_000_000 : return terbilang (n // 1_000_000) + ' million '", "if n // 1_000 == 1 : return 'one thousand' + (terbilang(n %", "True : os.system('cls') try : n = int(input('Number ? ')) print (f'{n:,} =", "% 1_000) if n % 1_000 != 0 else '') else : return", "// 100 == 1 : return 'one hundred' + (terbilang(n % 100) if", "'nine'] def terbilang (n) : if n < 10 : return kata [n]", "n // 100 == 1 : return 'one hundred' + (terbilang(n % 100)", "!=0 else '') elif n >= 100 : if n // 100 ==", "% 1_000 != 0 else '') else : return terbilang (n // 1_000)", "100 !=0 else '') elif n >= 20 : if n == 21", ": return terbilang (n // 1_000_000_000) + ' billion ' + (terbilang(n %", ": return kata [n] elif n >= 1_000_000_000 : return terbilang (n //", "'twenty nine' if n == 30 : return 'thirty' if n == 50", "1_000_000) if n % 1_000_000 != 0 else '') elif n >= 1_000", "15 : return 'fifteen' else : return terbilang (n % 10) + 'teen'", "'twenty six' if n == 27 : return 'twenty seven' if n ==", "% 100) if n % 100 !=0 else '') elif n >= 20", "' + (terbilang(n % 1_000) if n % 1_000 !=0 else '') elif", "if n % 100 != 0 else '') else : return terbilang (n", "23 : return 'twenty three' if n == 24 : return 'twenty four'", "elif n >= 1_000_000_000 : return terbilang (n // 1_000_000_000) + ' billion", "four' if n == 25 : return 'twenty five' if n == 26", "'one hundred' + (terbilang(n % 100) if n % 100 != 0 else", "import os kata = ['', 'one', 'two', 'tree', 'four', 'five', 'six', 'seven', 'eight',", "'fourteen' elif n == 15 : return 'fifteen' else : return terbilang (n", "if n == 22 : return 'twenty two' if n == 23 :", "return terbilang (n // 100) + 'hundred' + (terbilang(n % 100) if n", "+ (terbilang(n % 1_000_000) if n % 1_000_000 != 0 else '') elif", ": return 'twenty five' if n == 26 : return 'twenty six' if", ": return 'fifty' return terbilang (n // 10) +'ty'+ (terbilang(n % 10) if", "return terbilang (n // 1_000) + ' thouosand ' + (terbilang(n % 1_000)", "'one thousand' + (terbilang(n % 1_000) if n % 1_000 != 0 else", "return 'twenty one' if n == 22 : return 'twenty two' if n", "== 13 : return 'thirteen' elif n == 14 : return 'fourteen' elif", "else : return terbilang (n // 1_000) + ' thouosand ' + (terbilang(n", "else : return terbilang (n // 100) + 'hundred' + (terbilang(n % 100)", "n == 30 : return 'thirty' if n == 50 : return 'fifty'", "// 10) +'ty'+ (terbilang(n % 10) if n % 10 !=0 else '')", ": return terbilang (n // 1_000) + ' thouosand ' + (terbilang(n %", "+ 'teen' while True : os.system('cls') try : n = int(input('Number ? '))", "n == 21 : return 'twenty one' if n == 22 : return", "+ (terbilang(n % 1_000_000_000) if n % 1_000_000_000 != 0 else '') elif", "two' if n == 23 : return 'twenty three' if n == 24", "elif n >= 1_000_000 : return terbilang (n // 1_000_000) + ' million", ": return 'ten' elif n == 11 : return 'eleven' elif n ==", "'thirteen' elif n == 14 : return 'fourteen' elif n == 15 :", "return 'twelve' elif n == 13 : return 'thirteen' elif n == 14", ": return terbilang (n % 10) + 'teen' while True : os.system('cls') try", "n % 1_000_000_000 != 0 else '') elif n >= 1_000_000 : return", "n == 29 : return 'twenty nine' if n == 30 : return", "n % 1_000 != 0 else '') else : return terbilang (n //", ": return terbilang (n // 1_000_000) + ' million ' + (terbilang(n %", "== 50 : return 'fifty' return terbilang (n // 10) +'ty'+ (terbilang(n %", "'five', 'six', 'seven', 'eight', 'nine'] def terbilang (n) : if n < 10", "n >= 20 : if n == 21 : return 'twenty one' if", ": return 'twenty one' if n == 22 : return 'twenty two' if", "terbilang (n % 10) + 'teen' while True : os.system('cls') try : n", "<reponame>Ellenn01/Tugas-pertemuan-12 import os kata = ['', 'one', 'two', 'tree', 'four', 'five', 'six', 'seven',", "n == 27 : return 'twenty seven' if n == 28 : return", "return terbilang (n % 10) + 'teen' while True : os.system('cls') try :", "eight' if n == 29 : return 'twenty nine' if n == 30", "return terbilang (n // 1_000_000) + ' million ' + (terbilang(n % 1_000_000)", "+ (terbilang(n % 100) if n % 100 !=0 else '') elif n", "1_000 : if n // 1_000 == 1 : return 'one thousand' +", "'eight', 'nine'] def terbilang (n) : if n < 10 : return kata", "else : if n == 10 : return 'ten' elif n == 11", "elif n == 11 : return 'eleven' elif n == 12 : return", "(n // 100) + 'hundred' + (terbilang(n % 100) if n % 100", "if n % 100 !=0 else '') elif n >= 20 : if", "kata [n] elif n >= 1_000_000_000 : return terbilang (n // 1_000_000_000) +", ": return 'twenty three' if n == 24 : return 'twenty four' if", "'seven', 'eight', 'nine'] def terbilang (n) : if n < 10 : return", "(terbilang(n % 10) if n % 10 !=0 else '') else : if", "n % 10 !=0 else '') else : if n == 10 :", "'fifty' return terbilang (n // 10) +'ty'+ (terbilang(n % 10) if n %", "1 : return 'one thousand' + (terbilang(n % 1_000) if n % 1_000", "if n == 28 : return 'twenty eight' if n == 29 :", "n % 1_000_000 != 0 else '') elif n >= 1_000 : if", "n % 100 !=0 else '') elif n >= 20 : if n", "27 : return 'twenty seven' if n == 28 : return 'twenty eight'", "1 : return 'one hundred' + (terbilang(n % 100) if n % 100", "'eleven' elif n == 12 : return 'twelve' elif n == 13 :", "if n == 30 : return 'thirty' if n == 50 : return", "1_000_000_000) if n % 1_000_000_000 != 0 else '') elif n >= 1_000_000", "== 15 : return 'fifteen' else : return terbilang (n % 10) +", "if n == 10 : return 'ten' elif n == 11 : return", "return 'one thousand' + (terbilang(n % 1_000) if n % 1_000 != 0", "+ (terbilang(n % 1_000) if n % 1_000 !=0 else '') elif n", "'') else : return terbilang (n // 100) + 'hundred' + (terbilang(n %", "0 else '') elif n >= 1_000 : if n // 1_000 ==", "'twenty eight' if n == 29 : return 'twenty nine' if n ==", "return 'one hundred' + (terbilang(n % 100) if n % 100 != 0", "def terbilang (n) : if n < 10 : return kata [n] elif", "if n == 27 : return 'twenty seven' if n == 28 :", "n == 50 : return 'fifty' return terbilang (n // 10) +'ty'+ (terbilang(n", "== 1 : return 'one hundred' + (terbilang(n % 100) if n %", ": return 'one hundred' + (terbilang(n % 100) if n % 100 !=", "return 'twenty eight' if n == 29 : return 'twenty nine' if n", "100) + 'hundred' + (terbilang(n % 100) if n % 100 !=0 else", "if n == 50 : return 'fifty' return terbilang (n // 10) +'ty'+", ": return 'eleven' elif n == 12 : return 'twelve' elif n ==", "n == 22 : return 'twenty two' if n == 23 : return", "return 'twenty four' if n == 25 : return 'twenty five' if n", ": if n == 10 : return 'ten' elif n == 11 :", "else '') elif n >= 1_000 : if n // 1_000 == 1", "1_000_000_000) + ' billion ' + (terbilang(n % 1_000_000_000) if n % 1_000_000_000", "three' if n == 24 : return 'twenty four' if n == 25", "n == 24 : return 'twenty four' if n == 25 : return", "return 'ten' elif n == 11 : return 'eleven' elif n == 12", "if n == 23 : return 'twenty three' if n == 24 :", ": return 'fifteen' else : return terbilang (n % 10) + 'teen' while", "% 1_000 !=0 else '') elif n >= 100 : if n //", ">= 1_000_000_000 : return terbilang (n // 1_000_000_000) + ' billion ' +", ": return 'twenty four' if n == 25 : return 'twenty five' if", "'ten' elif n == 11 : return 'eleven' elif n == 12 :", ": return 'thirty' if n == 50 : return 'fifty' return terbilang (n", "50 : return 'fifty' return terbilang (n // 10) +'ty'+ (terbilang(n % 10)", "if n == 26 : return 'twenty six' if n == 27 :", "(terbilang(n % 100) if n % 100 !=0 else '') elif n >=", "terbilang (n // 1_000_000_000) + ' billion ' + (terbilang(n % 1_000_000_000) if", ": return 'twenty six' if n == 27 : return 'twenty seven' if", "' + (terbilang(n % 1_000_000) if n % 1_000_000 != 0 else '')", "hundred' + (terbilang(n % 100) if n % 100 != 0 else '')", "13 : return 'thirteen' elif n == 14 : return 'fourteen' elif n", "n == 12 : return 'twelve' elif n == 13 : return 'thirteen'", "!=0 else '') elif n >= 20 : if n == 21 :", "== 1 : return 'one thousand' + (terbilang(n % 1_000) if n %", "10 : return kata [n] elif n >= 1_000_000_000 : return terbilang (n", "== 27 : return 'twenty seven' if n == 28 : return 'twenty", "== 28 : return 'twenty eight' if n == 29 : return 'twenty", "!= 0 else '') else : return terbilang (n // 1_000) + '", "% 10 !=0 else '') else : if n == 10 : return", "= int(input('Number ? ')) print (f'{n:,} = {terbilang(n)}') except : print('ty again ...')", "'twelve' elif n == 13 : return 'thirteen' elif n == 14 :", "else '') elif n >= 1_000_000 : return terbilang (n // 1_000_000) +", "(terbilang(n % 1_000) if n % 1_000 !=0 else '') elif n >=", "% 100 !=0 else '') elif n >= 20 : if n ==", "0 else '') elif n >= 1_000_000 : return terbilang (n // 1_000_000)", "28 : return 'twenty eight' if n == 29 : return 'twenty nine'", "return 'twenty six' if n == 27 : return 'twenty seven' if n", "100 == 1 : return 'one hundred' + (terbilang(n % 100) if n", "return kata [n] elif n >= 1_000_000_000 : return terbilang (n // 1_000_000_000)", ": return terbilang (n // 100) + 'hundred' + (terbilang(n % 100) if", "% 1_000) if n % 1_000 !=0 else '') elif n >= 100", ": os.system('cls') try : n = int(input('Number ? ')) print (f'{n:,} = {terbilang(n)}')", "elif n == 12 : return 'twelve' elif n == 13 : return", "'') else : return terbilang (n // 1_000) + ' thouosand ' +", "(terbilang(n % 100) if n % 100 != 0 else '') else :", "1_000_000 != 0 else '') elif n >= 1_000 : if n //", "'twenty three' if n == 24 : return 'twenty four' if n ==", "if n % 1_000 != 0 else '') else : return terbilang (n", "// 100) + 'hundred' + (terbilang(n % 100) if n % 100 !=0", "int(input('Number ? ')) print (f'{n:,} = {terbilang(n)}') except : print('ty again ...') os.system('pause')", "1_000) if n % 1_000 != 0 else '') else : return terbilang", ": return 'one thousand' + (terbilang(n % 1_000) if n % 1_000 !=", "'one', 'two', 'tree', 'four', 'five', 'six', 'seven', 'eight', 'nine'] def terbilang (n) :" ]
[ "if child.tag == \"nd\": way.add_node(int(child.attrib['ref'])) elif child.tag == \"tag\": key = child.attrib['k'] val", "nodes[node_id] = node def parse_osm_file(filename): tree = et.parse(filename) roads = [] nodes =", "None): way.name = val elif key == \"oneway\": way.is_one_way = val == \"yes\"", "nodes = dict() for item in tree.iter(): if item.tag == \"node\": extract_node(item, nodes)", "== \"node\": extract_node(item, nodes) elif item.tag == \"way\": extract_road(item, roads) return roads, nodes", "roads, nodes if __name__ == \"__main__\": roads, nodes = parse_osm_file(\"../osm_birmingham.xml\") print(str(len(nodes)) + \"", "== \"tag\": key = child.attrib['k'] val = child.attrib['v'] if key == \"name\" or", "= child.attrib['k'] val = child.attrib['v'] if child.tag == \"tag\": node.add_tag(key, val) nodes[node_id] =", "node def parse_osm_file(filename): tree = et.parse(filename) roads = [] nodes = dict() for", "item in tree.iter(): if item.tag == \"node\": extract_node(item, nodes) elif item.tag == \"way\":", "and is_highway: roads.append(way) def extract_node(item, nodes): node_id = int(item.attrib['id']) node_lat = float(item.attrib['lat']) node_lon", "[] nodes = dict() for item in tree.iter(): if item.tag == \"node\": extract_node(item,", "print(str(len(nodes)) + \" nodes in dataset\") print(str(len(roads)) + \" roads in dataset\") pass", "tree.iter(): if item.tag == \"node\": extract_node(item, nodes) elif item.tag == \"way\": extract_road(item, roads)", "None and is_highway: roads.append(way) def extract_node(item, nodes): node_id = int(item.attrib['id']) node_lat = float(item.attrib['lat'])", "key == \"highway\": is_highway = True if way.name is not None and is_highway:", "is None): way.name = val elif key == \"oneway\": way.is_one_way = val ==", "item.tag == \"node\": extract_node(item, nodes) elif item.tag == \"way\": extract_road(item, roads) return roads,", "in item: key = child.attrib['k'] val = child.attrib['v'] if child.tag == \"tag\": node.add_tag(key,", "not None and is_highway: roads.append(way) def extract_node(item, nodes): node_id = int(item.attrib['id']) node_lat =", "tree = et.parse(filename) roads = [] nodes = dict() for item in tree.iter():", "objects.node import Node from objects.way import Way def extract_road(item, roads): way_id = int(item.attrib['id'])", "== \"oneway\": way.is_one_way = val == \"yes\" elif key == \"highway\": is_highway =", "dict() for item in tree.iter(): if item.tag == \"node\": extract_node(item, nodes) elif item.tag", "extract_road(item, roads): way_id = int(item.attrib['id']) way = Way(way_id) is_highway = False for child", "== \"nd\": way.add_node(int(child.attrib['ref'])) elif child.tag == \"tag\": key = child.attrib['k'] val = child.attrib['v']", "for item in tree.iter(): if item.tag == \"node\": extract_node(item, nodes) elif item.tag ==", "import Way def extract_road(item, roads): way_id = int(item.attrib['id']) way = Way(way_id) is_highway =", "= Node(node_id, node_lat, node_lon) for child in item: key = child.attrib['k'] val =", "extract_node(item, nodes) elif item.tag == \"way\": extract_road(item, roads) return roads, nodes if __name__", "float(item.attrib['lon']) node = Node(node_id, node_lat, node_lon) for child in item: key = child.attrib['k']", "= val elif key == \"oneway\": way.is_one_way = val == \"yes\" elif key", "int(item.attrib['id']) node_lat = float(item.attrib['lat']) node_lon = float(item.attrib['lon']) node = Node(node_id, node_lat, node_lon) for", "def extract_node(item, nodes): node_id = int(item.attrib['id']) node_lat = float(item.attrib['lat']) node_lon = float(item.attrib['lon']) node", "Node(node_id, node_lat, node_lon) for child in item: key = child.attrib['k'] val = child.attrib['v']", "return roads, nodes if __name__ == \"__main__\": roads, nodes = parse_osm_file(\"../osm_birmingham.xml\") print(str(len(nodes)) +", "\"oneway\": way.is_one_way = val == \"yes\" elif key == \"highway\": is_highway = True", "way = Way(way_id) is_highway = False for child in item: if child.tag ==", "child.attrib['v'] if child.tag == \"tag\": node.add_tag(key, val) nodes[node_id] = node def parse_osm_file(filename): tree", "val = child.attrib['v'] if child.tag == \"tag\": node.add_tag(key, val) nodes[node_id] = node def", "val) nodes[node_id] = node def parse_osm_file(filename): tree = et.parse(filename) roads = [] nodes", "parse_osm_file(filename): tree = et.parse(filename) roads = [] nodes = dict() for item in", "== \"name\" or (key == \"ref\" and way.name is None): way.name = val", "node_lat, node_lon) for child in item: key = child.attrib['k'] val = child.attrib['v'] if", "nodes = parse_osm_file(\"../osm_birmingham.xml\") print(str(len(nodes)) + \" nodes in dataset\") print(str(len(roads)) + \" roads", "elif item.tag == \"way\": extract_road(item, roads) return roads, nodes if __name__ == \"__main__\":", "__name__ == \"__main__\": roads, nodes = parse_osm_file(\"../osm_birmingham.xml\") print(str(len(nodes)) + \" nodes in dataset\")", "= int(item.attrib['id']) node_lat = float(item.attrib['lat']) node_lon = float(item.attrib['lon']) node = Node(node_id, node_lat, node_lon)", "= child.attrib['k'] val = child.attrib['v'] if key == \"name\" or (key == \"ref\"", "\"yes\" elif key == \"highway\": is_highway = True if way.name is not None", "if __name__ == \"__main__\": roads, nodes = parse_osm_file(\"../osm_birmingham.xml\") print(str(len(nodes)) + \" nodes in", "objects.way import Way def extract_road(item, roads): way_id = int(item.attrib['id']) way = Way(way_id) is_highway", "import xml.etree.ElementTree as et from objects.node import Node from objects.way import Way def", "elif key == \"oneway\": way.is_one_way = val == \"yes\" elif key == \"highway\":", "et.parse(filename) roads = [] nodes = dict() for item in tree.iter(): if item.tag", "nodes) elif item.tag == \"way\": extract_road(item, roads) return roads, nodes if __name__ ==", "way.name = val elif key == \"oneway\": way.is_one_way = val == \"yes\" elif", "= Way(way_id) is_highway = False for child in item: if child.tag == \"nd\":", "roads) return roads, nodes if __name__ == \"__main__\": roads, nodes = parse_osm_file(\"../osm_birmingham.xml\") print(str(len(nodes))", "roads.append(way) def extract_node(item, nodes): node_id = int(item.attrib['id']) node_lat = float(item.attrib['lat']) node_lon = float(item.attrib['lon'])", "= val == \"yes\" elif key == \"highway\": is_highway = True if way.name", "and way.name is None): way.name = val elif key == \"oneway\": way.is_one_way =", "extract_road(item, roads) return roads, nodes if __name__ == \"__main__\": roads, nodes = parse_osm_file(\"../osm_birmingham.xml\")", "way.name is None): way.name = val elif key == \"oneway\": way.is_one_way = val", "= et.parse(filename) roads = [] nodes = dict() for item in tree.iter(): if", "nodes if __name__ == \"__main__\": roads, nodes = parse_osm_file(\"../osm_birmingham.xml\") print(str(len(nodes)) + \" nodes", "== \"tag\": node.add_tag(key, val) nodes[node_id] = node def parse_osm_file(filename): tree = et.parse(filename) roads", "or (key == \"ref\" and way.name is None): way.name = val elif key", "for child in item: if child.tag == \"nd\": way.add_node(int(child.attrib['ref'])) elif child.tag == \"tag\":", "if child.tag == \"tag\": node.add_tag(key, val) nodes[node_id] = node def parse_osm_file(filename): tree =", "= float(item.attrib['lat']) node_lon = float(item.attrib['lon']) node = Node(node_id, node_lat, node_lon) for child in", "== \"ref\" and way.name is None): way.name = val elif key == \"oneway\":", "\"tag\": key = child.attrib['k'] val = child.attrib['v'] if key == \"name\" or (key", "Way def extract_road(item, roads): way_id = int(item.attrib['id']) way = Way(way_id) is_highway = False", "key = child.attrib['k'] val = child.attrib['v'] if child.tag == \"tag\": node.add_tag(key, val) nodes[node_id]", "if item.tag == \"node\": extract_node(item, nodes) elif item.tag == \"way\": extract_road(item, roads) return", "val == \"yes\" elif key == \"highway\": is_highway = True if way.name is", "is_highway = False for child in item: if child.tag == \"nd\": way.add_node(int(child.attrib['ref'])) elif", "nodes): node_id = int(item.attrib['id']) node_lat = float(item.attrib['lat']) node_lon = float(item.attrib['lon']) node = Node(node_id,", "Way(way_id) is_highway = False for child in item: if child.tag == \"nd\": way.add_node(int(child.attrib['ref']))", "from objects.way import Way def extract_road(item, roads): way_id = int(item.attrib['id']) way = Way(way_id)", "extract_node(item, nodes): node_id = int(item.attrib['id']) node_lat = float(item.attrib['lat']) node_lon = float(item.attrib['lon']) node =", "child in item: key = child.attrib['k'] val = child.attrib['v'] if child.tag == \"tag\":", "roads = [] nodes = dict() for item in tree.iter(): if item.tag ==", "= True if way.name is not None and is_highway: roads.append(way) def extract_node(item, nodes):", "xml.etree.ElementTree as et from objects.node import Node from objects.way import Way def extract_road(item,", "\"nd\": way.add_node(int(child.attrib['ref'])) elif child.tag == \"tag\": key = child.attrib['k'] val = child.attrib['v'] if", "= [] nodes = dict() for item in tree.iter(): if item.tag == \"node\":", "node_lon = float(item.attrib['lon']) node = Node(node_id, node_lat, node_lon) for child in item: key", "child.attrib['k'] val = child.attrib['v'] if child.tag == \"tag\": node.add_tag(key, val) nodes[node_id] = node", "is not None and is_highway: roads.append(way) def extract_node(item, nodes): node_id = int(item.attrib['id']) node_lat", "\"__main__\": roads, nodes = parse_osm_file(\"../osm_birmingham.xml\") print(str(len(nodes)) + \" nodes in dataset\") print(str(len(roads)) +", "key == \"oneway\": way.is_one_way = val == \"yes\" elif key == \"highway\": is_highway", "True if way.name is not None and is_highway: roads.append(way) def extract_node(item, nodes): node_id", "= dict() for item in tree.iter(): if item.tag == \"node\": extract_node(item, nodes) elif", "for child in item: key = child.attrib['k'] val = child.attrib['v'] if child.tag ==", "node_lat = float(item.attrib['lat']) node_lon = float(item.attrib['lon']) node = Node(node_id, node_lat, node_lon) for child", "key == \"name\" or (key == \"ref\" and way.name is None): way.name =", "roads, nodes = parse_osm_file(\"../osm_birmingham.xml\") print(str(len(nodes)) + \" nodes in dataset\") print(str(len(roads)) + \"", "item: if child.tag == \"nd\": way.add_node(int(child.attrib['ref'])) elif child.tag == \"tag\": key = child.attrib['k']", "as et from objects.node import Node from objects.way import Way def extract_road(item, roads):", "val elif key == \"oneway\": way.is_one_way = val == \"yes\" elif key ==", "\"highway\": is_highway = True if way.name is not None and is_highway: roads.append(way) def", "= child.attrib['v'] if child.tag == \"tag\": node.add_tag(key, val) nodes[node_id] = node def parse_osm_file(filename):", "way.add_node(int(child.attrib['ref'])) elif child.tag == \"tag\": key = child.attrib['k'] val = child.attrib['v'] if key", "def parse_osm_file(filename): tree = et.parse(filename) roads = [] nodes = dict() for item", "def extract_road(item, roads): way_id = int(item.attrib['id']) way = Way(way_id) is_highway = False for", "= int(item.attrib['id']) way = Way(way_id) is_highway = False for child in item: if", "child.tag == \"tag\": key = child.attrib['k'] val = child.attrib['v'] if key == \"name\"", "node.add_tag(key, val) nodes[node_id] = node def parse_osm_file(filename): tree = et.parse(filename) roads = []", "Node from objects.way import Way def extract_road(item, roads): way_id = int(item.attrib['id']) way =", "et from objects.node import Node from objects.way import Way def extract_road(item, roads): way_id", "roads): way_id = int(item.attrib['id']) way = Way(way_id) is_highway = False for child in", "child.attrib['v'] if key == \"name\" or (key == \"ref\" and way.name is None):", "from objects.node import Node from objects.way import Way def extract_road(item, roads): way_id =", "int(item.attrib['id']) way = Way(way_id) is_highway = False for child in item: if child.tag", "float(item.attrib['lat']) node_lon = float(item.attrib['lon']) node = Node(node_id, node_lat, node_lon) for child in item:", "== \"__main__\": roads, nodes = parse_osm_file(\"../osm_birmingham.xml\") print(str(len(nodes)) + \" nodes in dataset\") print(str(len(roads))", "node_id = int(item.attrib['id']) node_lat = float(item.attrib['lat']) node_lon = float(item.attrib['lon']) node = Node(node_id, node_lat,", "elif key == \"highway\": is_highway = True if way.name is not None and", "child.attrib['k'] val = child.attrib['v'] if key == \"name\" or (key == \"ref\" and", "child.tag == \"tag\": node.add_tag(key, val) nodes[node_id] = node def parse_osm_file(filename): tree = et.parse(filename)", "item: key = child.attrib['k'] val = child.attrib['v'] if child.tag == \"tag\": node.add_tag(key, val)", "node = Node(node_id, node_lat, node_lon) for child in item: key = child.attrib['k'] val", "\"way\": extract_road(item, roads) return roads, nodes if __name__ == \"__main__\": roads, nodes =", "\"tag\": node.add_tag(key, val) nodes[node_id] = node def parse_osm_file(filename): tree = et.parse(filename) roads =", "\"ref\" and way.name is None): way.name = val elif key == \"oneway\": way.is_one_way", "is_highway: roads.append(way) def extract_node(item, nodes): node_id = int(item.attrib['id']) node_lat = float(item.attrib['lat']) node_lon =", "(key == \"ref\" and way.name is None): way.name = val elif key ==", "\"name\" or (key == \"ref\" and way.name is None): way.name = val elif", "way.name is not None and is_highway: roads.append(way) def extract_node(item, nodes): node_id = int(item.attrib['id'])", "== \"way\": extract_road(item, roads) return roads, nodes if __name__ == \"__main__\": roads, nodes", "= node def parse_osm_file(filename): tree = et.parse(filename) roads = [] nodes = dict()", "= child.attrib['v'] if key == \"name\" or (key == \"ref\" and way.name is", "node_lon) for child in item: key = child.attrib['k'] val = child.attrib['v'] if child.tag", "== \"yes\" elif key == \"highway\": is_highway = True if way.name is not", "= parse_osm_file(\"../osm_birmingham.xml\") print(str(len(nodes)) + \" nodes in dataset\") print(str(len(roads)) + \" roads in", "way.is_one_way = val == \"yes\" elif key == \"highway\": is_highway = True if", "way_id = int(item.attrib['id']) way = Way(way_id) is_highway = False for child in item:", "in item: if child.tag == \"nd\": way.add_node(int(child.attrib['ref'])) elif child.tag == \"tag\": key =", "key = child.attrib['k'] val = child.attrib['v'] if key == \"name\" or (key ==", "parse_osm_file(\"../osm_birmingham.xml\") print(str(len(nodes)) + \" nodes in dataset\") print(str(len(roads)) + \" roads in dataset\")", "item.tag == \"way\": extract_road(item, roads) return roads, nodes if __name__ == \"__main__\": roads,", "if way.name is not None and is_highway: roads.append(way) def extract_node(item, nodes): node_id =", "\"node\": extract_node(item, nodes) elif item.tag == \"way\": extract_road(item, roads) return roads, nodes if", "is_highway = True if way.name is not None and is_highway: roads.append(way) def extract_node(item,", "= float(item.attrib['lon']) node = Node(node_id, node_lat, node_lon) for child in item: key =", "== \"highway\": is_highway = True if way.name is not None and is_highway: roads.append(way)", "import Node from objects.way import Way def extract_road(item, roads): way_id = int(item.attrib['id']) way", "child in item: if child.tag == \"nd\": way.add_node(int(child.attrib['ref'])) elif child.tag == \"tag\": key", "= False for child in item: if child.tag == \"nd\": way.add_node(int(child.attrib['ref'])) elif child.tag", "False for child in item: if child.tag == \"nd\": way.add_node(int(child.attrib['ref'])) elif child.tag ==", "val = child.attrib['v'] if key == \"name\" or (key == \"ref\" and way.name", "elif child.tag == \"tag\": key = child.attrib['k'] val = child.attrib['v'] if key ==", "if key == \"name\" or (key == \"ref\" and way.name is None): way.name", "in tree.iter(): if item.tag == \"node\": extract_node(item, nodes) elif item.tag == \"way\": extract_road(item,", "child.tag == \"nd\": way.add_node(int(child.attrib['ref'])) elif child.tag == \"tag\": key = child.attrib['k'] val =" ]
[]
[ "- alpha * dz db = (1.0 / m) * np.sum(error_martrix) b =", "y def loss(forcast, real): return -(real * math.log(forcast) + (1-real)*log(1-forcast)) def sigmoidOnEle(matrix): m,n", "for i in range(maxCycles): # this is Z set() z = np.dot(dataSetMat, weights)+b", "* np.dot(dataSetMat.transpose(),error_martrix) weights = weights - alpha * dz db = (1.0 /", "in range(m): rows = [] for j in range(n): sig = sigmoid(matrix[i,j]) rows.append(sig)", "j in range(n): sig = sigmoid(matrix[i,j]) rows.append(sig) arr.append(rows) return mat(arr) \"\"\" alpha: learning", "martix_A's columns equals matrix_B's rows \"\"\" def linearReg(dataSet, labels): dataSetMat = mat(dataSet) labelMat", "dataSetMat = mat(dataSet) labelMat = mat(labels).transpose() alpha = 0.001 maxCycles = 1000 m,n", "set() z = np.dot(dataSetMat, weights)+b # this is A set() y_hat = sigmoidOnEle(z)", "-(real * math.log(forcast) + (1-real)*log(1-forcast)) def sigmoidOnEle(matrix): m,n = shape(matrix) arr = []", "weights)+b # this is A set() y_hat = sigmoidOnEle(z) error_martrix = y_hat -", "# this is A set() y_hat = sigmoidOnEle(z) error_martrix = y_hat - labelMat", "math import sys def loadDataSet(filename): f = open(filename) lines = f.readlines() dataSet =", "= shape(matrix) arr = [] for i in range(m): rows = [] for", "[] for i in range(m): rows = [] for j in range(n): sig", "arr.append(rows) return mat(arr) \"\"\" alpha: learning rate is 0.001 maxCycle: loop times is", "= mat(dataSet) labelMat = mat(labels).transpose() alpha = 0.001 maxCycles = 1000 m,n =", "real): return -(real * math.log(forcast) + (1-real)*log(1-forcast)) def sigmoidOnEle(matrix): m,n = shape(matrix) arr", "1.0 / (1+math.exp(-num)) # loss function, forcast => y_hat, real=>labels and y def", "is 0.001 maxCycle: loop times is 500 matrix rule: martix_A's columns equals matrix_B's", "a:float(a), cols[0:-1]))) labels.append(float(cols[-1])) f.close() return dataSet, labels def sigmoid(num): return 1.0 / (1+math.exp(-num))", "i = i.strip() cols = i.split(\"\\t\") dataSet.append(list(map(lambda a:float(a), cols[0:-1]))) labels.append(float(cols[-1])) f.close() return dataSet,", "this is A set() y_hat = sigmoidOnEle(z) error_martrix = y_hat - labelMat dz", "m,n = shape(matrix) arr = [] for i in range(m): rows = []", "(1.0 / m) * np.dot(dataSetMat.transpose(),error_martrix) weights = weights - alpha * dz db", "dataSet, labels = loadDataSet(filename) w,b= linearReg(dataSet, labels) print(w) print(b) # some test #test_data", "and y def loss(forcast, real): return -(real * math.log(forcast) + (1-real)*log(1-forcast)) def sigmoidOnEle(matrix):", "return mat(arr) \"\"\" alpha: learning rate is 0.001 maxCycle: loop times is 500", "np.sum(error_martrix) b = b - alpha * db return weights, b if __name__", "maxCycle: loop times is 500 matrix rule: martix_A's columns equals matrix_B's rows \"\"\"", "labelMat dz = (1.0 / m) * np.dot(dataSetMat.transpose(),error_martrix) weights = weights - alpha", "labelMat = mat(labels).transpose() alpha = 0.001 maxCycles = 1000 m,n = shape(dataSet) weights", "sigmoid(matrix[i,j]) rows.append(sig) arr.append(rows) return mat(arr) \"\"\" alpha: learning rate is 0.001 maxCycle: loop", "i.split(\"\\t\") dataSet.append(list(map(lambda a:float(a), cols[0:-1]))) labels.append(float(cols[-1])) f.close() return dataSet, labels def sigmoid(num): return 1.0", "equals matrix_B's rows \"\"\" def linearReg(dataSet, labels): dataSetMat = mat(dataSet) labelMat = mat(labels).transpose()", "np from numpy import * import math import sys def loadDataSet(filename): f =", "= [] labels = [] for i in lines: i = i.strip() cols", "= (1.0 / m) * np.sum(error_martrix) b = b - alpha * db", "= 0.001 maxCycles = 1000 m,n = shape(dataSet) weights = zeros((n,1)) b =", "f.readlines() dataSet = [] labels = [] for i in lines: i =", "[] labels = [] for i in lines: i = i.strip() cols =", "labels = [] for i in lines: i = i.strip() cols = i.split(\"\\t\")", "forcast => y_hat, real=>labels and y def loss(forcast, real): return -(real * math.log(forcast)", "y_hat = sigmoidOnEle(z) error_martrix = y_hat - labelMat dz = (1.0 / m)", "weights - alpha * dz db = (1.0 / m) * np.sum(error_martrix) b", "alpha: learning rate is 0.001 maxCycle: loop times is 500 matrix rule: martix_A's", "== '__main__': filename = sys.argv[1] dataSet, labels = loadDataSet(filename) w,b= linearReg(dataSet, labels) print(w)", "/ (1+math.exp(-num)) # loss function, forcast => y_hat, real=>labels and y def loss(forcast,", "some test #test_data = mat(dataSet[0:10]) #test_labels = mat(labels[0:10]) #y_hat = np.dot(test_data,w) + b", "columns equals matrix_B's rows \"\"\" def linearReg(dataSet, labels): dataSetMat = mat(dataSet) labelMat =", "# coding: utf-8 import numpy as np from numpy import * import math", "alpha = 0.001 maxCycles = 1000 m,n = shape(dataSet) weights = zeros((n,1)) b", "rows = [] for j in range(n): sig = sigmoid(matrix[i,j]) rows.append(sig) arr.append(rows) return", "dataSet = [] labels = [] for i in lines: i = i.strip()", "db = (1.0 / m) * np.sum(error_martrix) b = b - alpha *", "m) * np.dot(dataSetMat.transpose(),error_martrix) weights = weights - alpha * dz db = (1.0", "= 1000 m,n = shape(dataSet) weights = zeros((n,1)) b = 0 for i", "# this is Z set() z = np.dot(dataSetMat, weights)+b # this is A", "- alpha * db return weights, b if __name__ == '__main__': filename =", "= i.strip() cols = i.split(\"\\t\") dataSet.append(list(map(lambda a:float(a), cols[0:-1]))) labels.append(float(cols[-1])) f.close() return dataSet, labels", "500 matrix rule: martix_A's columns equals matrix_B's rows \"\"\" def linearReg(dataSet, labels): dataSetMat", "labels def sigmoid(num): return 1.0 / (1+math.exp(-num)) # loss function, forcast => y_hat,", "is 500 matrix rule: martix_A's columns equals matrix_B's rows \"\"\" def linearReg(dataSet, labels):", "labels.append(float(cols[-1])) f.close() return dataSet, labels def sigmoid(num): return 1.0 / (1+math.exp(-num)) # loss", "import sys def loadDataSet(filename): f = open(filename) lines = f.readlines() dataSet = []", "f.close() return dataSet, labels def sigmoid(num): return 1.0 / (1+math.exp(-num)) # loss function,", "linearReg(dataSet, labels): dataSetMat = mat(dataSet) labelMat = mat(labels).transpose() alpha = 0.001 maxCycles =", "error_martrix = y_hat - labelMat dz = (1.0 / m) * np.dot(dataSetMat.transpose(),error_martrix) weights", "in range(n): sig = sigmoid(matrix[i,j]) rows.append(sig) arr.append(rows) return mat(arr) \"\"\" alpha: learning rate", "zeros((n,1)) b = 0 for i in range(maxCycles): # this is Z set()", "= open(filename) lines = f.readlines() dataSet = [] labels = [] for i", "= [] for j in range(n): sig = sigmoid(matrix[i,j]) rows.append(sig) arr.append(rows) return mat(arr)", "filename = sys.argv[1] dataSet, labels = loadDataSet(filename) w,b= linearReg(dataSet, labels) print(w) print(b) #", "= loadDataSet(filename) w,b= linearReg(dataSet, labels) print(w) print(b) # some test #test_data = mat(dataSet[0:10])", "= np.dot(dataSetMat, weights)+b # this is A set() y_hat = sigmoidOnEle(z) error_martrix =", "= sigmoidOnEle(z) error_martrix = y_hat - labelMat dz = (1.0 / m) *", "linearReg(dataSet, labels) print(w) print(b) # some test #test_data = mat(dataSet[0:10]) #test_labels = mat(labels[0:10])", "i in range(m): rows = [] for j in range(n): sig = sigmoid(matrix[i,j])", "labels = loadDataSet(filename) w,b= linearReg(dataSet, labels) print(w) print(b) # some test #test_data =", "= i.split(\"\\t\") dataSet.append(list(map(lambda a:float(a), cols[0:-1]))) labels.append(float(cols[-1])) f.close() return dataSet, labels def sigmoid(num): return", "b - alpha * db return weights, b if __name__ == '__main__': filename", "[] for j in range(n): sig = sigmoid(matrix[i,j]) rows.append(sig) arr.append(rows) return mat(arr) \"\"\"", "= shape(dataSet) weights = zeros((n,1)) b = 0 for i in range(maxCycles): #", "i.strip() cols = i.split(\"\\t\") dataSet.append(list(map(lambda a:float(a), cols[0:-1]))) labels.append(float(cols[-1])) f.close() return dataSet, labels def", "w,b= linearReg(dataSet, labels) print(w) print(b) # some test #test_data = mat(dataSet[0:10]) #test_labels =", "coding: utf-8 import numpy as np from numpy import * import math import", "loadDataSet(filename): f = open(filename) lines = f.readlines() dataSet = [] labels = []", "'__main__': filename = sys.argv[1] dataSet, labels = loadDataSet(filename) w,b= linearReg(dataSet, labels) print(w) print(b)", "return 1.0 / (1+math.exp(-num)) # loss function, forcast => y_hat, real=>labels and y", "mat(labels).transpose() alpha = 0.001 maxCycles = 1000 m,n = shape(dataSet) weights = zeros((n,1))", "shape(dataSet) weights = zeros((n,1)) b = 0 for i in range(maxCycles): # this", "b = b - alpha * db return weights, b if __name__ ==", "(1-real)*log(1-forcast)) def sigmoidOnEle(matrix): m,n = shape(matrix) arr = [] for i in range(m):", "numpy import * import math import sys def loadDataSet(filename): f = open(filename) lines", "import math import sys def loadDataSet(filename): f = open(filename) lines = f.readlines() dataSet", "labels): dataSetMat = mat(dataSet) labelMat = mat(labels).transpose() alpha = 0.001 maxCycles = 1000", "= weights - alpha * dz db = (1.0 / m) * np.sum(error_martrix)", "db return weights, b if __name__ == '__main__': filename = sys.argv[1] dataSet, labels", "- labelMat dz = (1.0 / m) * np.dot(dataSetMat.transpose(),error_martrix) weights = weights -", "return weights, b if __name__ == '__main__': filename = sys.argv[1] dataSet, labels =", "= mat(labels).transpose() alpha = 0.001 maxCycles = 1000 m,n = shape(dataSet) weights =", "\"\"\" alpha: learning rate is 0.001 maxCycle: loop times is 500 matrix rule:", "from numpy import * import math import sys def loadDataSet(filename): f = open(filename)", "in range(maxCycles): # this is Z set() z = np.dot(dataSetMat, weights)+b # this", "loss(forcast, real): return -(real * math.log(forcast) + (1-real)*log(1-forcast)) def sigmoidOnEle(matrix): m,n = shape(matrix)", "b = 0 for i in range(maxCycles): # this is Z set() z", "m) * np.sum(error_martrix) b = b - alpha * db return weights, b", "= [] for i in lines: i = i.strip() cols = i.split(\"\\t\") dataSet.append(list(map(lambda", "this is Z set() z = np.dot(dataSetMat, weights)+b # this is A set()", "= y_hat - labelMat dz = (1.0 / m) * np.dot(dataSetMat.transpose(),error_martrix) weights =", "loop times is 500 matrix rule: martix_A's columns equals matrix_B's rows \"\"\" def", "arr = [] for i in range(m): rows = [] for j in", "cols = i.split(\"\\t\") dataSet.append(list(map(lambda a:float(a), cols[0:-1]))) labels.append(float(cols[-1])) f.close() return dataSet, labels def sigmoid(num):", "sys def loadDataSet(filename): f = open(filename) lines = f.readlines() dataSet = [] labels", "utf-8 import numpy as np from numpy import * import math import sys", "# loss function, forcast => y_hat, real=>labels and y def loss(forcast, real): return", "+ (1-real)*log(1-forcast)) def sigmoidOnEle(matrix): m,n = shape(matrix) arr = [] for i in", "= 0 for i in range(maxCycles): # this is Z set() z =", "math.log(forcast) + (1-real)*log(1-forcast)) def sigmoidOnEle(matrix): m,n = shape(matrix) arr = [] for i", "(1.0 / m) * np.sum(error_martrix) b = b - alpha * db return", "<filename>linearRegression/linearReg.py # coding: utf-8 import numpy as np from numpy import * import", "range(m): rows = [] for j in range(n): sig = sigmoid(matrix[i,j]) rows.append(sig) arr.append(rows)", "z = np.dot(dataSetMat, weights)+b # this is A set() y_hat = sigmoidOnEle(z) error_martrix", "dataSet, labels def sigmoid(num): return 1.0 / (1+math.exp(-num)) # loss function, forcast =>", "def sigmoid(num): return 1.0 / (1+math.exp(-num)) # loss function, forcast => y_hat, real=>labels", "np.dot(dataSetMat, weights)+b # this is A set() y_hat = sigmoidOnEle(z) error_martrix = y_hat", "numpy as np from numpy import * import math import sys def loadDataSet(filename):", "in lines: i = i.strip() cols = i.split(\"\\t\") dataSet.append(list(map(lambda a:float(a), cols[0:-1]))) labels.append(float(cols[-1])) f.close()", "weights = zeros((n,1)) b = 0 for i in range(maxCycles): # this is", "range(maxCycles): # this is Z set() z = np.dot(dataSetMat, weights)+b # this is", "lines: i = i.strip() cols = i.split(\"\\t\") dataSet.append(list(map(lambda a:float(a), cols[0:-1]))) labels.append(float(cols[-1])) f.close() return", "loadDataSet(filename) w,b= linearReg(dataSet, labels) print(w) print(b) # some test #test_data = mat(dataSet[0:10]) #test_labels", "sig = sigmoid(matrix[i,j]) rows.append(sig) arr.append(rows) return mat(arr) \"\"\" alpha: learning rate is 0.001", "0.001 maxCycles = 1000 m,n = shape(dataSet) weights = zeros((n,1)) b = 0", "0.001 maxCycle: loop times is 500 matrix rule: martix_A's columns equals matrix_B's rows", "* import math import sys def loadDataSet(filename): f = open(filename) lines = f.readlines()", "return -(real * math.log(forcast) + (1-real)*log(1-forcast)) def sigmoidOnEle(matrix): m,n = shape(matrix) arr =", "dataSet.append(list(map(lambda a:float(a), cols[0:-1]))) labels.append(float(cols[-1])) f.close() return dataSet, labels def sigmoid(num): return 1.0 /", "for i in range(m): rows = [] for j in range(n): sig =", "mat(dataSet) labelMat = mat(labels).transpose() alpha = 0.001 maxCycles = 1000 m,n = shape(dataSet)", "A set() y_hat = sigmoidOnEle(z) error_martrix = y_hat - labelMat dz = (1.0", "for i in lines: i = i.strip() cols = i.split(\"\\t\") dataSet.append(list(map(lambda a:float(a), cols[0:-1])))", "=> y_hat, real=>labels and y def loss(forcast, real): return -(real * math.log(forcast) +", "* db return weights, b if __name__ == '__main__': filename = sys.argv[1] dataSet,", "for j in range(n): sig = sigmoid(matrix[i,j]) rows.append(sig) arr.append(rows) return mat(arr) \"\"\" alpha:", "alpha * db return weights, b if __name__ == '__main__': filename = sys.argv[1]", "sigmoid(num): return 1.0 / (1+math.exp(-num)) # loss function, forcast => y_hat, real=>labels and", "sigmoidOnEle(matrix): m,n = shape(matrix) arr = [] for i in range(m): rows =", "y_hat - labelMat dz = (1.0 / m) * np.dot(dataSetMat.transpose(),error_martrix) weights = weights", "np.dot(dataSetMat.transpose(),error_martrix) weights = weights - alpha * dz db = (1.0 / m)", "/ m) * np.sum(error_martrix) b = b - alpha * db return weights,", "def loadDataSet(filename): f = open(filename) lines = f.readlines() dataSet = [] labels =", "i in lines: i = i.strip() cols = i.split(\"\\t\") dataSet.append(list(map(lambda a:float(a), cols[0:-1]))) labels.append(float(cols[-1]))", "cols[0:-1]))) labels.append(float(cols[-1])) f.close() return dataSet, labels def sigmoid(num): return 1.0 / (1+math.exp(-num)) #", "= [] for i in range(m): rows = [] for j in range(n):", "* np.sum(error_martrix) b = b - alpha * db return weights, b if", "i in range(maxCycles): # this is Z set() z = np.dot(dataSetMat, weights)+b #", "rate is 0.001 maxCycle: loop times is 500 matrix rule: martix_A's columns equals", "import * import math import sys def loadDataSet(filename): f = open(filename) lines =", "b if __name__ == '__main__': filename = sys.argv[1] dataSet, labels = loadDataSet(filename) w,b=", "[] for i in lines: i = i.strip() cols = i.split(\"\\t\") dataSet.append(list(map(lambda a:float(a),", "import numpy as np from numpy import * import math import sys def", "1000 m,n = shape(dataSet) weights = zeros((n,1)) b = 0 for i in", "weights = weights - alpha * dz db = (1.0 / m) *", "mat(arr) \"\"\" alpha: learning rate is 0.001 maxCycle: loop times is 500 matrix", "loss function, forcast => y_hat, real=>labels and y def loss(forcast, real): return -(real", "def linearReg(dataSet, labels): dataSetMat = mat(dataSet) labelMat = mat(labels).transpose() alpha = 0.001 maxCycles", "def sigmoidOnEle(matrix): m,n = shape(matrix) arr = [] for i in range(m): rows", "(1+math.exp(-num)) # loss function, forcast => y_hat, real=>labels and y def loss(forcast, real):", "matrix rule: martix_A's columns equals matrix_B's rows \"\"\" def linearReg(dataSet, labels): dataSetMat =", "/ m) * np.dot(dataSetMat.transpose(),error_martrix) weights = weights - alpha * dz db =", "matrix_B's rows \"\"\" def linearReg(dataSet, labels): dataSetMat = mat(dataSet) labelMat = mat(labels).transpose() alpha", "function, forcast => y_hat, real=>labels and y def loss(forcast, real): return -(real *", "alpha * dz db = (1.0 / m) * np.sum(error_martrix) b = b", "range(n): sig = sigmoid(matrix[i,j]) rows.append(sig) arr.append(rows) return mat(arr) \"\"\" alpha: learning rate is", "= b - alpha * db return weights, b if __name__ == '__main__':", "if __name__ == '__main__': filename = sys.argv[1] dataSet, labels = loadDataSet(filename) w,b= linearReg(dataSet,", "= sys.argv[1] dataSet, labels = loadDataSet(filename) w,b= linearReg(dataSet, labels) print(w) print(b) # some", "sys.argv[1] dataSet, labels = loadDataSet(filename) w,b= linearReg(dataSet, labels) print(w) print(b) # some test", "times is 500 matrix rule: martix_A's columns equals matrix_B's rows \"\"\" def linearReg(dataSet,", "* dz db = (1.0 / m) * np.sum(error_martrix) b = b -", "dz db = (1.0 / m) * np.sum(error_martrix) b = b - alpha", "return dataSet, labels def sigmoid(num): return 1.0 / (1+math.exp(-num)) # loss function, forcast", "= (1.0 / m) * np.dot(dataSetMat.transpose(),error_martrix) weights = weights - alpha * dz", "print(w) print(b) # some test #test_data = mat(dataSet[0:10]) #test_labels = mat(labels[0:10]) #y_hat =", "= sigmoid(matrix[i,j]) rows.append(sig) arr.append(rows) return mat(arr) \"\"\" alpha: learning rate is 0.001 maxCycle:", "= f.readlines() dataSet = [] labels = [] for i in lines: i", "lines = f.readlines() dataSet = [] labels = [] for i in lines:", "weights, b if __name__ == '__main__': filename = sys.argv[1] dataSet, labels = loadDataSet(filename)", "= zeros((n,1)) b = 0 for i in range(maxCycles): # this is Z", "\"\"\" def linearReg(dataSet, labels): dataSetMat = mat(dataSet) labelMat = mat(labels).transpose() alpha = 0.001", "0 for i in range(maxCycles): # this is Z set() z = np.dot(dataSetMat,", "set() y_hat = sigmoidOnEle(z) error_martrix = y_hat - labelMat dz = (1.0 /", "y_hat, real=>labels and y def loss(forcast, real): return -(real * math.log(forcast) + (1-real)*log(1-forcast))", "rule: martix_A's columns equals matrix_B's rows \"\"\" def linearReg(dataSet, labels): dataSetMat = mat(dataSet)", "rows \"\"\" def linearReg(dataSet, labels): dataSetMat = mat(dataSet) labelMat = mat(labels).transpose() alpha =", "dz = (1.0 / m) * np.dot(dataSetMat.transpose(),error_martrix) weights = weights - alpha *", "# some test #test_data = mat(dataSet[0:10]) #test_labels = mat(labels[0:10]) #y_hat = np.dot(test_data,w) +", "real=>labels and y def loss(forcast, real): return -(real * math.log(forcast) + (1-real)*log(1-forcast)) def", "is Z set() z = np.dot(dataSetMat, weights)+b # this is A set() y_hat", "maxCycles = 1000 m,n = shape(dataSet) weights = zeros((n,1)) b = 0 for", "is A set() y_hat = sigmoidOnEle(z) error_martrix = y_hat - labelMat dz =", "open(filename) lines = f.readlines() dataSet = [] labels = [] for i in", "shape(matrix) arr = [] for i in range(m): rows = [] for j", "f = open(filename) lines = f.readlines() dataSet = [] labels = [] for", "__name__ == '__main__': filename = sys.argv[1] dataSet, labels = loadDataSet(filename) w,b= linearReg(dataSet, labels)", "rows.append(sig) arr.append(rows) return mat(arr) \"\"\" alpha: learning rate is 0.001 maxCycle: loop times", "learning rate is 0.001 maxCycle: loop times is 500 matrix rule: martix_A's columns", "m,n = shape(dataSet) weights = zeros((n,1)) b = 0 for i in range(maxCycles):", "def loss(forcast, real): return -(real * math.log(forcast) + (1-real)*log(1-forcast)) def sigmoidOnEle(matrix): m,n =", "sigmoidOnEle(z) error_martrix = y_hat - labelMat dz = (1.0 / m) * np.dot(dataSetMat.transpose(),error_martrix)", "labels) print(w) print(b) # some test #test_data = mat(dataSet[0:10]) #test_labels = mat(labels[0:10]) #y_hat", "as np from numpy import * import math import sys def loadDataSet(filename): f", "print(b) # some test #test_data = mat(dataSet[0:10]) #test_labels = mat(labels[0:10]) #y_hat = np.dot(test_data,w)", "Z set() z = np.dot(dataSetMat, weights)+b # this is A set() y_hat =", "* math.log(forcast) + (1-real)*log(1-forcast)) def sigmoidOnEle(matrix): m,n = shape(matrix) arr = [] for" ]
[ "is a list of strings for which the sum operation is skipped. Returns", "list skip_keys. Typically, the values of X are NumPy arrays (histograms) that are", "JSON text files if (len(py_keys) > 0): for key in py_keys: file_name =", "np_keys: np_keys.remove(rad) np_keys.insert(0, rad) # --- np_all_1d = True for key in np_keys:", "to X for any value in Y, excluding keys that are in the", "list of strings skip_keys is a list of strings for which the append", "strings skip_keys is a list of strings for which the sum operation is", "C (e.g. to implement averaging operation). Parameters ---------- X : dict X is", "np.ndarray): np_keys.append(key) else: py_keys.append(key) # --- np_keys.sort() py_keys.sort() # --- (1) save NumPy", "an ASCII output routine is provided. \"\"\" import copy import numpy as np", "--- dump data util.savetxtHeader(path + '/' + key + '.dat', '# ' +", "# --- dump data util.savetxtHeader(path + '/' + key + '.dat', '# '", "in the list skip_keys. Typically, the values of X, Y are NumPy arrays", "values of X are NumPy arrays (histograms) that are rescaled after summation using", "the list skip_keys. Typically, the values of X are NumPy arrays (histograms) that", "arrays (histograms) that are rescaled after summation using a scalar C (e.g. to", "def write_dict(dic, path, level=0): \"\"\"Write a dictionary containing NumPy arrays or other Python", "directly and does not return anything. \"\"\" assert isinstance(X, dict) for key in", "appended. Parameters ---------- X : dict X is a dictionary with string keys", "data structures. path : string Path where the dictionary and its data shall", ": string Path where the dictionary and its data shall be written to.", "return anything. \"\"\" np_keys = [] py_keys = [] for key in list(dic.keys()):", "if rad in np_keys: np_keys.remove(rad) np_keys.insert(0, rad) # --- np_all_1d = True for", "for key in py_keys: file_name = path + '/' + key + '.json'", "enumerate(np_keys): arr[:, idx] = (dic[key])[:] # --- build header if rad in np_keys:", "key in py_keys: file_name = path + '/' + key + '.json' util.md(file_name)", "for which the append operation is skipped. Returns ------- None The function scale_values", "+ key _level = level + 1 write_dict(val, _path, _level) else: if isinstance(val,", "size compatible with the contents of X. skip_keys : list of strings skip_keys", "text files if (len(py_keys) > 0): for key in py_keys: file_name = path", "skip_keys. Typically, the values of X are NumPy arrays (histograms) that are rescaled", "NumPy arrays. C : scalar, NumPy array C is a multiplier, either a", "contents of X. skip_keys : list of strings skip_keys is a list of", "skip_keys. Typically, the values of X, Y are NumPy arrays (e.g. particle numbers)", "to JSON text files if (len(py_keys) > 0): for key in py_keys: file_name", "py_keys.append(key) # --- np_keys.sort() py_keys.sort() # --- (1) save NumPy arrays to text", "import copy import numpy as np import json from . import util def", "purposes and does not have any practical relevance. Returns ------- None The function", "guarantee successful operation. Parameters ---------- dic : dictionary A dictionary containing NumPy arrays", "relevance. Returns ------- None The function write_dict does not return anything. \"\"\" np_keys", "CAlculation of DIStance HIstograms # # Copyright (c) <NAME>, <NAME> # See the", "sum_values operates on X directly and does not return anything. \"\"\" assert isinstance(X,", "applied to any value in X, excluding keys that are in the list", "case the dictionary contains other dictionaries, the function is called recursively. The keys", "for idx, key in enumerate(np_keys): arr[:, idx] = (dic[key])[:] # --- build header", "rad) # --- np_all_1d = True for key in np_keys: val = dic[key]", "The function scale_values operates on X directly and does not return anything. \"\"\"", "arr = np.zeros([n_row, n_col]) for idx, key in enumerate(np_keys): arr[:, idx] = (dic[key])[:]", "of strings for which the sum operation is skipped. Returns ------- None The", "# --- np_keys.sort() py_keys.sort() # --- (1) save NumPy arrays to text files", "NumPy arrays. skip_keys : list of strings skip_keys is a list of strings", "' + key, arr) # --- (2) for robustness, save any other Python", "+ '.dat', '# ' + key, arr) # --- (2) for robustness, save", ": scalar, NumPy array C is a multiplier, either a scalar of a", "'.dat', '# ' + key, arr) # --- (2) for robustness, save any", "Cadishi --- CAlculation of DIStance HIstograms # # Copyright (c) <NAME>, <NAME> #", "of strings skip_keys is a list of strings for which the append operation", "continue X[key] *= C def append_values(X, Y, skip_keys=['radii']): \"\"\"Implement X.append(Y) where X and", "scalar, NumPy array C is a multiplier, either a scalar of a NumPy", "a multiplier, either a scalar of a NumPy array of size compatible with", "tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # Cadishi --- CAlculation of DIStance HIstograms", "that contain NumPy data types. The operation is applied to X for any", "any practical relevance. Returns ------- None The function write_dict does not return anything.", "that contains supported data types. The operation is applied to any value in", "= [] for key in list(dic.keys()): val = dic[key] if isinstance(val, dict): _path", "+ '.dat', header, arr) else: # --- we save arrays with more than", "Copyright (c) <NAME>, <NAME> # See the file AUTHORS.rst for the full list", "a list of strings for which the sum operation is skipped. Returns -------", "_level) else: if isinstance(val, np.ndarray): np_keys.append(key) else: py_keys.append(key) # --- np_keys.sort() py_keys.sort() #", "isinstance(val, np.ndarray): np_keys.append(key) else: py_keys.append(key) # --- np_keys.sort() py_keys.sort() # --- (1) save", "0): if np_all_1d: # --- concatenate arrays into a 2d array val =", "isinstance(val, dict): _path = path + '/' + key _level = level +", "--- (1) save NumPy arrays to text files rad = 'radii' if rad", "dict Y is a dictionary with string keys that contains NumPy arrays. skip_keys", "of a NumPy array of size compatible with the contents of X. skip_keys", "a list of strings for which the append operation is skipped. Returns -------", "np_all_1d = False break if (len(np_keys) > 0): if np_all_1d: # --- concatenate", "supported data types. The operation is applied to any value in X, excluding", "see the file LICENSE.txt. \"\"\"Various NumPy- and dictionary-related utilities. Implements add, append, and", "arrays) stored in dictionaries. In addition, an ASCII output routine is provided. \"\"\"", "X: X[key] = copy.deepcopy(Y[key]) else: X[key] = np.append(X[key], Y[key]) def write_dict(dic, path, level=0):", "NumPy- and dictionary-related utilities. Implements add, append, and scale operations for numerical data", "X for any value in Y, excluding keys that are in the list", "+ '/' + key + '.json' util.md(file_name) with open(file_name, \"w\") as fp: json.dump(dic[key],", "is a Python dictionary that contains supported data types. The operation is applied", "arrays. Y : dict Y is a dictionary with string keys that contains", "dictionary-related utilities. Implements add, append, and scale operations for numerical data (ie. NumPy", "val = dic[key] if (len(val.shape) > 1): np_all_1d = False break if (len(np_keys)", "Typically, the values of X, Y are NumPy arrays (e.g. histograms) that are", "for any value in Y, excluding keys that are in the list skip_keys.", "on X directly and does not return anything. \"\"\" assert isinstance(X, dict) assert", "as np import json from . import util def sum_values(X, Y, skip_keys=['radii', 'frame']):", "sum operation is skipped. Returns ------- None The function scale_values operates on X", "key in skip_keys: continue X[key] *= C def append_values(X, Y, skip_keys=['radii']): \"\"\"Implement X.append(Y)", "function scale_values operates on X directly and does not return anything. \"\"\" assert", "break if (len(np_keys) > 0): if np_all_1d: # --- concatenate arrays into a", "= [] py_keys = [] for key in list(dic.keys()): val = dic[key] if", "in list(dic.keys()): val = dic[key] if isinstance(val, dict): _path = path + '/'", "in np_keys: arr = dic[key] # --- dump data util.savetxtHeader(path + '/' +", ": int, optional Level in the nested-dictionary hierarchy during recursive operation. This parameter", "X are NumPy arrays (histograms) that are rescaled after summation using a scalar", "key in np_keys: header = header + ' ' + key # ---", "(dic[key])[:] # --- build header if rad in np_keys: np_keys.remove(rad) header = '#'", "shall be written to. level : int, optional Level in the nested-dictionary hierarchy", "Path where the dictionary and its data shall be written to. level :", "+ key, arr) # --- (2) for robustness, save any other Python data", "'frame']): \"\"\"Implement X += Y where X and Y are Python dictionaries (with", "Y : dict Y is a dictionary with string keys that contains NumPy", "in the nested-dictionary hierarchy during recursive operation. This parameter was added for debugging", "[] for key in list(dic.keys()): val = dic[key] if isinstance(val, dict): _path =", "rad in np_keys: np_keys.remove(rad) np_keys.insert(0, rad) # --- np_all_1d = True for key", "# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*- # vim: tabstop=4", "or other Python data structures to text files. In case the dictionary contains", "np_keys: arr = dic[key] # --- dump data util.savetxtHeader(path + '/' + key", "\"\"\"Implement X += Y where X and Y are Python dictionaries (with string", "is applied to X for any value in Y, excluding keys that are", "than one dimension separately for key in np_keys: arr = dic[key] # ---", "multiplier, either a scalar of a NumPy array of size compatible with the", "else: py_keys.append(key) # --- np_keys.sort() py_keys.sort() # --- (1) save NumPy arrays to", "if isinstance(val, np.ndarray): np_keys.append(key) else: py_keys.append(key) # --- np_keys.sort() py_keys.sort() # --- (1)", "X, Y are NumPy arrays (e.g. particle numbers) that are appended. Parameters ----------", "a Python dictionary that contains supported data types. The operation is applied to", "else: # --- we save arrays with more than one dimension separately for", "+ key # --- dump data util.savetxtHeader(path + '.dat', header, arr) else: #", "\"\"\" import copy import numpy as np import json from . import util", "have any practical relevance. Returns ------- None The function write_dict does not return", "in np_keys: np_keys.remove(rad) header = '#' for key in np_keys: header = header", "+= Y where X and Y are Python dictionaries (with string keys) that", "# See the file AUTHORS.rst for the full list of contributors. # #", "*= C def append_values(X, Y, skip_keys=['radii']): \"\"\"Implement X.append(Y) where X and Y are", "is a dictionary with string keys that contains NumPy arrays. Y : dict", "if key not in X: X[key] = copy.deepcopy(Y[key]) else: X[key] += Y[key] def", "is a multiplier, either a scalar of a NumPy array of size compatible", "or other Python data structures. path : string Path where the dictionary and", "containing NumPy arrays or other Python data structures. path : string Path where", "+ key + '.dat', '# ' + key, arr) # --- (2) for", "Y are Python dictionaries (with string keys) that contain summable data types. The", "header, arr) else: # --- we save arrays with more than one dimension", "utilities. Implements add, append, and scale operations for numerical data (ie. NumPy arrays)", "dimension separately for key in np_keys: arr = dic[key] # --- dump data", "--- (2) for robustness, save any other Python data to JSON text files", "in list(Y.keys()): if key in skip_keys: continue if key not in X: X[key]", "X and Y are Python dictionaries that contain NumPy data types. The operation", "assert isinstance(Y, dict) for key in list(Y.keys()): if key in skip_keys: continue if", "= True for key in np_keys: val = dic[key] if (len(val.shape) > 1):", "with string keys that contains NumPy arrays. Y : dict Y is a", "the values of X, Y are NumPy arrays (e.g. histograms) that are summed.", "in dictionaries. In addition, an ASCII output routine is provided. \"\"\" import copy", "HIstograms # # Copyright (c) <NAME>, <NAME> # See the file AUTHORS.rst for", "add, append, and scale operations for numerical data (ie. NumPy arrays) stored in", "operations for numerical data (ie. NumPy arrays) stored in dictionaries. In addition, an", "dict): _path = path + '/' + key _level = level + 1", "util.savetxtHeader(path + '/' + key + '.dat', '# ' + key, arr) #", "'radii' if rad in np_keys: np_keys.remove(rad) np_keys.insert(0, rad) # --- np_all_1d = True", "C def append_values(X, Y, skip_keys=['radii']): \"\"\"Implement X.append(Y) where X and Y are Python", "strings for which the sum operation is skipped. Returns ------- None The function", "sum_values(X, Y, skip_keys=['radii', 'frame']): \"\"\"Implement X += Y where X and Y are", "else: X[key] = np.append(X[key], Y[key]) def write_dict(dic, path, level=0): \"\"\"Write a dictionary containing", "Y are NumPy arrays (e.g. histograms) that are summed. Parameters ---------- X :", "is applied to any value in X, excluding keys that are in the", "skipped. Returns ------- None The function sum_values operates on X directly and does", "isinstance(X, dict) assert isinstance(Y, dict) for key in list(Y.keys()): if key in skip_keys:", "np.zeros([n_row, n_col]) for idx, key in enumerate(np_keys): arr[:, idx] = (dic[key])[:] # ---", "dict) for key in list(Y.keys()): if key in skip_keys: continue if key not", "\"\"\"Implement X.append(Y) where X and Y are Python dictionaries that contain NumPy data", "key in list(X.keys()): if key in skip_keys: continue X[key] *= C def append_values(X,", "other Python data structures to text files. In case the dictionary contains other", "with more than one dimension separately for key in np_keys: arr = dic[key]", "1): np_all_1d = False break if (len(np_keys) > 0): if np_all_1d: # ---", "vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # Cadishi --- CAlculation of DIStance", "of X, Y are NumPy arrays (e.g. particle numbers) that are appended. Parameters", "one dimension separately for key in np_keys: arr = dic[key] # --- dump", "particle numbers) that are appended. Parameters ---------- X : dict X is a", "py_keys.sort() # --- (1) save NumPy arrays to text files rad = 'radii'", "'/' + key + '.dat', '# ' + key, arr) # --- (2)", "val = dic[key] if isinstance(val, dict): _path = path + '/' + key", "+= Y[key] def scale_values(X, C, skip_keys=['radii', 'frame']): \"\"\"Implement X = X times C", "other Python data to JSON text files if (len(py_keys) > 0): for key", "path : string Path where the dictionary and its data shall be written", "--- np_keys.sort() py_keys.sort() # --- (1) save NumPy arrays to text files rad", "with the contents of X. skip_keys : list of strings skip_keys is a", "np_keys.insert(0, rad) # --- np_all_1d = True for key in np_keys: val =", "# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # Cadishi --- CAlculation of", "# Copyright (c) <NAME>, <NAME> # See the file AUTHORS.rst for the full", "data util.savetxtHeader(path + '.dat', header, arr) else: # --- we save arrays with", "X[key] += Y[key] def scale_values(X, C, skip_keys=['radii', 'frame']): \"\"\"Implement X = X times", "None The function scale_values operates on X directly and does not return anything.", "dictionaries. In addition, an ASCII output routine is provided. \"\"\" import copy import", "which the sum operation is skipped. Returns ------- None The function scale_values operates", "continue if key not in X: X[key] = copy.deepcopy(Y[key]) else: X[key] = np.append(X[key],", "coding: utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # Cadishi", "level : int, optional Level in the nested-dictionary hierarchy during recursive operation. This", "write_dict(val, _path, _level) else: if isinstance(val, np.ndarray): np_keys.append(key) else: py_keys.append(key) # --- np_keys.sort()", "for debugging purposes and does not have any practical relevance. Returns ------- None", "arrays (e.g. particle numbers) that are appended. Parameters ---------- X : dict X", "= (dic[key])[:] # --- build header if rad in np_keys: np_keys.remove(rad) header =", "skip_keys. Typically, the values of X, Y are NumPy arrays (e.g. histograms) that", "save NumPy arrays to text files rad = 'radii' if rad in np_keys:", "operation. Parameters ---------- dic : dictionary A dictionary containing NumPy arrays or other", "is a dictionary with string keys that contains NumPy arrays. skip_keys : list", "where X and Y are Python dictionaries that contain NumPy data types. The", ": dict Y is a dictionary with string keys that contains NumPy arrays.", "are Python dictionaries that contain NumPy data types. The operation is applied to", "<filename>cadishi/dict_util.py # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*- # vim:", "utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # Cadishi ---", "return anything. \"\"\" assert isinstance(X, dict) for key in list(X.keys()): if key in", "write_dict(dic, path, level=0): \"\"\"Write a dictionary containing NumPy arrays or other Python data", "= header + ' ' + key # --- dump data util.savetxtHeader(path +", "data structures to text files. In case the dictionary contains other dictionaries, the", "skip_keys=['radii']): \"\"\"Implement X.append(Y) where X and Y are Python dictionaries that contain NumPy", "operation. This parameter was added for debugging purposes and does not have any", "1 write_dict(val, _path, _level) else: if isinstance(val, np.ndarray): np_keys.append(key) else: py_keys.append(key) # ---", "X, excluding keys that are in the list skip_keys. Typically, the values of", "X[key] = copy.deepcopy(Y[key]) else: X[key] = np.append(X[key], Y[key]) def write_dict(dic, path, level=0): \"\"\"Write", "in skip_keys: continue X[key] *= C def append_values(X, Y, skip_keys=['radii']): \"\"\"Implement X.append(Y) where", "sum operation is skipped. Returns ------- None The function sum_values operates on X", "X and Y are Python dictionaries (with string keys) that contain summable data", "Y is a dictionary with string keys that contains NumPy arrays. skip_keys :", "key in list(Y.keys()): if key in skip_keys: continue if key not in X:", "excluding keys that are in the list skip_keys. Typically, the values of X", "a 2d array val = dic[np_keys[0]] n_row = val.shape[0] n_col = len(np_keys) arr", "util def sum_values(X, Y, skip_keys=['radii', 'frame']): \"\"\"Implement X += Y where X and", "tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8", "The operation is applied to X for any value in Y, excluding keys", "NumPy array of size compatible with the contents of X. skip_keys : list", "arrays (e.g. histograms) that are summed. Parameters ---------- X : dict X is", "into a 2d array val = dic[np_keys[0]] n_row = val.shape[0] n_col = len(np_keys)", "X = X times C where X is a Python dictionary that contains", "after summation using a scalar C (e.g. to implement averaging operation). Parameters ----------", "<NAME> # See the file AUTHORS.rst for the full list of contributors. #", "for key in list(X.keys()): if key in skip_keys: continue X[key] *= C def", "\"\"\"Various NumPy- and dictionary-related utilities. Implements add, append, and scale operations for numerical", "= '#' for key in np_keys: header = header + ' ' +", "Python data structures. path : string Path where the dictionary and its data", "A dictionary containing NumPy arrays or other Python data structures. path : string", "containing NumPy arrays or other Python data structures to text files. In case", "if (len(val.shape) > 1): np_all_1d = False break if (len(np_keys) > 0): if", "X. skip_keys : list of strings skip_keys is a list of strings for", "_level = level + 1 write_dict(val, _path, _level) else: if isinstance(val, np.ndarray): np_keys.append(key)", "See the file AUTHORS.rst for the full list of contributors. # # Released", "the values of X are NumPy arrays (histograms) that are rescaled after summation", "a dictionary with string keys that contains NumPy arrays. skip_keys : list of", "<NAME>, <NAME> # See the file AUTHORS.rst for the full list of contributors.", "py_keys = [] for key in list(dic.keys()): val = dic[key] if isinstance(val, dict):", "= path + '/' + key + '.json' util.md(file_name) with open(file_name, \"w\") as", "and scale operations for numerical data (ie. NumPy arrays) stored in dictionaries. In", "the list skip_keys. Typically, the values of X, Y are NumPy arrays (e.g.", "Python dictionary that contains supported data types. The operation is applied to any", "numbers) that are appended. Parameters ---------- X : dict X is a dictionary", "---------- dic : dictionary A dictionary containing NumPy arrays or other Python data", "and does not return anything. \"\"\" assert isinstance(X, dict) for key in list(X.keys()):", "that contains NumPy arrays. Y : dict Y is a dictionary with string", "--- concatenate arrays into a 2d array val = dic[np_keys[0]] n_row = val.shape[0]", "rad in np_keys: np_keys.remove(rad) header = '#' for key in np_keys: header =", "the full list of contributors. # # Released under the MIT License, see", "The function write_dict does not return anything. \"\"\" np_keys = [] py_keys =", "# # Cadishi --- CAlculation of DIStance HIstograms # # Copyright (c) <NAME>,", "compatible with the contents of X. skip_keys : list of strings skip_keys is", "header = '#' for key in np_keys: header = header + ' '", "contains NumPy arrays. skip_keys : list of strings skip_keys is a list of", "to. level : int, optional Level in the nested-dictionary hierarchy during recursive operation.", "array C is a multiplier, either a scalar of a NumPy array of", "is skipped. Returns ------- None The function scale_values operates on X directly and", "data shall be written to. level : int, optional Level in the nested-dictionary", "files if (len(py_keys) > 0): for key in py_keys: file_name = path +", "any value in Y, excluding keys that are in the list skip_keys. Typically,", "X : dict X is a dictionary with string keys that contains NumPy", "operates on X directly and does not return anything. \"\"\" assert isinstance(X, dict)", "def scale_values(X, C, skip_keys=['radii', 'frame']): \"\"\"Implement X = X times C where X", "dic[np_keys[0]] n_row = val.shape[0] n_col = len(np_keys) arr = np.zeros([n_row, n_col]) for idx,", "in enumerate(np_keys): arr[:, idx] = (dic[key])[:] # --- build header if rad in", "X directly and does not return anything. \"\"\" assert isinstance(X, dict) for key", "other dictionaries, the function is called recursively. The keys should be strings to", "to implement averaging operation). Parameters ---------- X : dict X is a dictionary", "X is a Python dictionary that contains supported data types. The operation is", ": dict X is a dictionary with string keys that contains NumPy arrays.", "Y where X and Y are Python dictionaries (with string keys) that contain", "0): for key in py_keys: file_name = path + '/' + key +", "list of contributors. # # Released under the MIT License, see the file", "scalar C (e.g. to implement averaging operation). Parameters ---------- X : dict X", "= False break if (len(np_keys) > 0): if np_all_1d: # --- concatenate arrays", "isinstance(X, dict) for key in list(X.keys()): if key in skip_keys: continue X[key] *=", "= val.shape[0] n_col = len(np_keys) arr = np.zeros([n_row, n_col]) for idx, key in", "path + '/' + key + '.json' util.md(file_name) with open(file_name, \"w\") as fp:", "with string keys that contains NumPy arrays. C : scalar, NumPy array C", "scale operations for numerical data (ie. NumPy arrays) stored in dictionaries. In addition,", "Y, excluding keys that are in the list skip_keys. Typically, the values of", "skip_keys is a list of strings for which the append operation is skipped.", "_path, _level) else: if isinstance(val, np.ndarray): np_keys.append(key) else: py_keys.append(key) # --- np_keys.sort() py_keys.sort()", "if np_all_1d: # --- concatenate arrays into a 2d array val = dic[np_keys[0]]", "' + key # --- dump data util.savetxtHeader(path + '.dat', header, arr) else:", "C where X is a Python dictionary that contains supported data types. The", "are Python dictionaries (with string keys) that contain summable data types. The operation", "is called recursively. The keys should be strings to guarantee successful operation. Parameters", "during recursive operation. This parameter was added for debugging purposes and does not", "(len(val.shape) > 1): np_all_1d = False break if (len(np_keys) > 0): if np_all_1d:", "n_col = len(np_keys) arr = np.zeros([n_row, n_col]) for idx, key in enumerate(np_keys): arr[:,", "'/' + key + '.json' util.md(file_name) with open(file_name, \"w\") as fp: json.dump(dic[key], fp,", "skip_keys: continue X[key] *= C def append_values(X, Y, skip_keys=['radii']): \"\"\"Implement X.append(Y) where X", "file AUTHORS.rst for the full list of contributors. # # Released under the", "MIT License, see the file LICENSE.txt. \"\"\"Various NumPy- and dictionary-related utilities. Implements add,", "practical relevance. Returns ------- None The function write_dict does not return anything. \"\"\"", "the values of X, Y are NumPy arrays (e.g. particle numbers) that are", "text files rad = 'radii' if rad in np_keys: np_keys.remove(rad) np_keys.insert(0, rad) #", "dictionaries that contain NumPy data types. The operation is applied to X for", "dic[key] # --- dump data util.savetxtHeader(path + '/' + key + '.dat', '#", "summable data types. The operation is applied to X for any value in", "files rad = 'radii' if rad in np_keys: np_keys.remove(rad) np_keys.insert(0, rad) # ---", "dictionary A dictionary containing NumPy arrays or other Python data structures. path :", "else: if isinstance(val, np.ndarray): np_keys.append(key) else: py_keys.append(key) # --- np_keys.sort() py_keys.sort() # ---", "of X, Y are NumPy arrays (e.g. histograms) that are summed. Parameters ----------", "= copy.deepcopy(Y[key]) else: X[key] = np.append(X[key], Y[key]) def write_dict(dic, path, level=0): \"\"\"Write a", "and Y are Python dictionaries that contain NumPy data types. The operation is", "\"\"\" assert isinstance(X, dict) assert isinstance(Y, dict) for key in list(Y.keys()): if key", "np_keys.remove(rad) header = '#' for key in np_keys: header = header + '", "assert isinstance(X, dict) assert isinstance(Y, dict) for key in list(Y.keys()): if key in", "file_name = path + '/' + key + '.json' util.md(file_name) with open(file_name, \"w\")", "array of size compatible with the contents of X. skip_keys : list of", "level + 1 write_dict(val, _path, _level) else: if isinstance(val, np.ndarray): np_keys.append(key) else: py_keys.append(key)", "does not return anything. \"\"\" assert isinstance(X, dict) for key in list(X.keys()): if", "contain NumPy data types. The operation is applied to X for any value", "[] py_keys = [] for key in list(dic.keys()): val = dic[key] if isinstance(val,", "= X times C where X is a Python dictionary that contains supported", "and dictionary-related utilities. Implements add, append, and scale operations for numerical data (ie.", "(1) save NumPy arrays to text files rad = 'radii' if rad in", "arr) # --- (2) for robustness, save any other Python data to JSON", "structures. path : string Path where the dictionary and its data shall be", "arrays to text files rad = 'radii' if rad in np_keys: np_keys.remove(rad) np_keys.insert(0,", "# --- dump data util.savetxtHeader(path + '.dat', header, arr) else: # --- we", "import json from . import util def sum_values(X, Y, skip_keys=['radii', 'frame']): \"\"\"Implement X", "'# ' + key, arr) # --- (2) for robustness, save any other", "list(dic.keys()): val = dic[key] if isinstance(val, dict): _path = path + '/' +", "write_dict does not return anything. \"\"\" np_keys = [] py_keys = [] for", "# Released under the MIT License, see the file LICENSE.txt. \"\"\"Various NumPy- and", "key in list(dic.keys()): val = dic[key] if isinstance(val, dict): _path = path +", "not return anything. \"\"\" assert isinstance(X, dict) for key in list(X.keys()): if key", "------- None The function write_dict does not return anything. \"\"\" np_keys = []", "= copy.deepcopy(Y[key]) else: X[key] += Y[key] def scale_values(X, C, skip_keys=['radii', 'frame']): \"\"\"Implement X", "keys that are in the list skip_keys. Typically, the values of X, Y", "list skip_keys. Typically, the values of X, Y are NumPy arrays (e.g. histograms)", "# --- (1) save NumPy arrays to text files rad = 'radii' if", "nested-dictionary hierarchy during recursive operation. This parameter was added for debugging purposes and", "for key in list(dic.keys()): val = dic[key] if isinstance(val, dict): _path = path", "arrays or other Python data structures to text files. In case the dictionary", "should be strings to guarantee successful operation. Parameters ---------- dic : dictionary A", "in the list skip_keys. Typically, the values of X are NumPy arrays (histograms)", "(histograms) that are rescaled after summation using a scalar C (e.g. to implement", "called recursively. The keys should be strings to guarantee successful operation. Parameters ----------", "--- build header if rad in np_keys: np_keys.remove(rad) header = '#' for key", "arrays. C : scalar, NumPy array C is a multiplier, either a scalar", "X += Y where X and Y are Python dictionaries (with string keys)", "does not return anything. \"\"\" np_keys = [] py_keys = [] for key", "ASCII output routine is provided. \"\"\" import copy import numpy as np import", "scale_values(X, C, skip_keys=['radii', 'frame']): \"\"\"Implement X = X times C where X is", "np_keys: header = header + ' ' + key # --- dump data", "Y[key]) def write_dict(dic, path, level=0): \"\"\"Write a dictionary containing NumPy arrays or other", "NumPy arrays (e.g. histograms) that are summed. Parameters ---------- X : dict X", "the nested-dictionary hierarchy during recursive operation. This parameter was added for debugging purposes", "n_row = val.shape[0] n_col = len(np_keys) arr = np.zeros([n_row, n_col]) for idx, key", "output routine is provided. \"\"\" import copy import numpy as np import json", "skip_keys: continue if key not in X: X[key] = copy.deepcopy(Y[key]) else: X[key] +=", "= dic[key] if (len(val.shape) > 1): np_all_1d = False break if (len(np_keys) >", "shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # Cadishi --- CAlculation of DIStance HIstograms # #", "Y, skip_keys=['radii', 'frame']): \"\"\"Implement X += Y where X and Y are Python", "types. The operation is applied to any value in X, excluding keys that", "the file LICENSE.txt. \"\"\"Various NumPy- and dictionary-related utilities. Implements add, append, and scale", "save any other Python data to JSON text files if (len(py_keys) > 0):", "keys that are in the list skip_keys. Typically, the values of X are", "which the append operation is skipped. Returns ------- None The function scale_values operates", "that are in the list skip_keys. Typically, the values of X are NumPy", "stored in dictionaries. In addition, an ASCII output routine is provided. \"\"\" import", "the append operation is skipped. Returns ------- None The function scale_values operates on", "anything. \"\"\" assert isinstance(X, dict) assert isinstance(Y, dict) for key in list(Y.keys()): if", "numpy as np import json from . import util def sum_values(X, Y, skip_keys=['radii',", "X directly and does not return anything. \"\"\" assert isinstance(X, dict) assert isinstance(Y,", "if isinstance(val, dict): _path = path + '/' + key _level = level", "json from . import util def sum_values(X, Y, skip_keys=['radii', 'frame']): \"\"\"Implement X +=", "for the full list of contributors. # # Released under the MIT License,", "be written to. level : int, optional Level in the nested-dictionary hierarchy during", "dictionary containing NumPy arrays or other Python data structures. path : string Path", "recursive operation. This parameter was added for debugging purposes and does not have", "parameter was added for debugging purposes and does not have any practical relevance.", "was added for debugging purposes and does not have any practical relevance. Returns", "values of X, Y are NumPy arrays (e.g. particle numbers) that are appended.", "dictionary that contains supported data types. The operation is applied to any value", "val = dic[np_keys[0]] n_row = val.shape[0] n_col = len(np_keys) arr = np.zeros([n_row, n_col])", "keys that contains NumPy arrays. skip_keys : list of strings skip_keys is a", "arr) else: # --- we save arrays with more than one dimension separately", "(e.g. particle numbers) that are appended. Parameters ---------- X : dict X is", "X is a dictionary with string keys that contains NumPy arrays. Y :", "in list(X.keys()): if key in skip_keys: continue X[key] *= C def append_values(X, Y,", "arrays with more than one dimension separately for key in np_keys: arr =", "a scalar of a NumPy array of size compatible with the contents of", "concatenate arrays into a 2d array val = dic[np_keys[0]] n_row = val.shape[0] n_col", "dictionary containing NumPy arrays or other Python data structures to text files. In", "path + '/' + key _level = level + 1 write_dict(val, _path, _level)", "np.append(X[key], Y[key]) def write_dict(dic, path, level=0): \"\"\"Write a dictionary containing NumPy arrays or", "that contains NumPy arrays. C : scalar, NumPy array C is a multiplier,", "Y[key] def scale_values(X, C, skip_keys=['radii', 'frame']): \"\"\"Implement X = X times C where", "string keys that contains NumPy arrays. C : scalar, NumPy array C is", "+ key + '.json' util.md(file_name) with open(file_name, \"w\") as fp: json.dump(dic[key], fp, indent=4,", "scalar of a NumPy array of size compatible with the contents of X.", "if key in skip_keys: continue X[key] *= C def append_values(X, Y, skip_keys=['radii']): \"\"\"Implement", "either a scalar of a NumPy array of size compatible with the contents", "files. In case the dictionary contains other dictionaries, the function is called recursively.", "in np_keys: val = dic[key] if (len(val.shape) > 1): np_all_1d = False break", "added for debugging purposes and does not have any practical relevance. Returns -------", "contain summable data types. The operation is applied to X for any value", ". import util def sum_values(X, Y, skip_keys=['radii', 'frame']): \"\"\"Implement X += Y where", "= np.append(X[key], Y[key]) def write_dict(dic, path, level=0): \"\"\"Write a dictionary containing NumPy arrays", "False break if (len(np_keys) > 0): if np_all_1d: # --- concatenate arrays into", "for key in np_keys: header = header + ' ' + key #", "# --- build header if rad in np_keys: np_keys.remove(rad) header = '#' for", "the sum operation is skipped. Returns ------- None The function scale_values operates on", "\"\"\" assert isinstance(X, dict) for key in list(X.keys()): if key in skip_keys: continue", "idx, key in enumerate(np_keys): arr[:, idx] = (dic[key])[:] # --- build header if", "if key in skip_keys: continue if key not in X: X[key] = copy.deepcopy(Y[key])", "key not in X: X[key] = copy.deepcopy(Y[key]) else: X[key] += Y[key] def scale_values(X,", "are NumPy arrays (e.g. histograms) that are summed. Parameters ---------- X : dict", "' ' + key # --- dump data util.savetxtHeader(path + '.dat', header, arr)", "Typically, the values of X are NumPy arrays (histograms) that are rescaled after", "DIStance HIstograms # # Copyright (c) <NAME>, <NAME> # See the file AUTHORS.rst", "Python dictionaries that contain NumPy data types. The operation is applied to X", "(2) for robustness, save any other Python data to JSON text files if", "list skip_keys. Typically, the values of X, Y are NumPy arrays (e.g. particle", "are in the list skip_keys. Typically, the values of X are NumPy arrays", "the dictionary contains other dictionaries, the function is called recursively. The keys should", "list of strings for which the sum operation is skipped. Returns ------- None", "return anything. \"\"\" assert isinstance(X, dict) assert isinstance(Y, dict) for key in list(Y.keys()):", "on X directly and does not return anything. \"\"\" assert isinstance(X, dict) for", "np_keys: val = dic[key] if (len(val.shape) > 1): np_all_1d = False break if", "key + '.json' util.md(file_name) with open(file_name, \"w\") as fp: json.dump(dic[key], fp, indent=4, sort_keys=True)", "arr[:, idx] = (dic[key])[:] # --- build header if rad in np_keys: np_keys.remove(rad)", "of strings skip_keys is a list of strings for which the sum operation", "structures to text files. In case the dictionary contains other dictionaries, the function", "X[key] = copy.deepcopy(Y[key]) else: X[key] += Y[key] def scale_values(X, C, skip_keys=['radii', 'frame']): \"\"\"Implement", "Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4", "The operation is applied to any value in X, excluding keys that are", "for key in np_keys: val = dic[key] if (len(val.shape) > 1): np_all_1d =", "data to JSON text files if (len(py_keys) > 0): for key in py_keys:", "optional Level in the nested-dictionary hierarchy during recursive operation. This parameter was added", "where X is a Python dictionary that contains supported data types. The operation", "+ '/' + key + '.dat', '# ' + key, arr) # ---", "of size compatible with the contents of X. skip_keys : list of strings", "arrays or other Python data structures. path : string Path where the dictionary", "written to. level : int, optional Level in the nested-dictionary hierarchy during recursive", "that are appended. Parameters ---------- X : dict X is a dictionary with", "indent-tabs-mode:nil; coding: utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # #", "> 1): np_all_1d = False break if (len(np_keys) > 0): if np_all_1d: #", "in X, excluding keys that are in the list skip_keys. Typically, the values", "debugging purposes and does not have any practical relevance. Returns ------- None The", "key + '.dat', '# ' + key, arr) # --- (2) for robustness,", "are NumPy arrays (histograms) that are rescaled after summation using a scalar C", "separately for key in np_keys: arr = dic[key] # --- dump data util.savetxtHeader(path", "if (len(np_keys) > 0): if np_all_1d: # --- concatenate arrays into a 2d", "string Path where the dictionary and its data shall be written to. level", "directly and does not return anything. \"\"\" assert isinstance(X, dict) assert isinstance(Y, dict)", "averaging operation). Parameters ---------- X : dict X is a dictionary with string", "------- None The function sum_values operates on X directly and does not return", "keys) that contain summable data types. The operation is applied to X for", "and does not return anything. \"\"\" assert isinstance(X, dict) assert isinstance(Y, dict) for", "NumPy array C is a multiplier, either a scalar of a NumPy array", "NumPy arrays or other Python data structures to text files. In case the", "not return anything. \"\"\" assert isinstance(X, dict) assert isinstance(Y, dict) for key in", "keys that contains NumPy arrays. C : scalar, NumPy array C is a", "_path = path + '/' + key _level = level + 1 write_dict(val,", "dictionaries (with string keys) that contain summable data types. The operation is applied", "import util def sum_values(X, Y, skip_keys=['radii', 'frame']): \"\"\"Implement X += Y where X", "to text files. In case the dictionary contains other dictionaries, the function is", "level=0): \"\"\"Write a dictionary containing NumPy arrays or other Python data structures to", "types. The operation is applied to X for any value in Y, excluding", "copy.deepcopy(Y[key]) else: X[key] = np.append(X[key], Y[key]) def write_dict(dic, path, level=0): \"\"\"Write a dictionary", "dic[key] if (len(val.shape) > 1): np_all_1d = False break if (len(np_keys) > 0):", "dictionary with string keys that contains NumPy arrays. C : scalar, NumPy array", "string keys that contains NumPy arrays. Y : dict Y is a dictionary", "to guarantee successful operation. Parameters ---------- dic : dictionary A dictionary containing NumPy", "in skip_keys: continue if key not in X: X[key] = copy.deepcopy(Y[key]) else: X[key]", "Returns ------- None The function write_dict does not return anything. \"\"\" np_keys =", "contains other dictionaries, the function is called recursively. The keys should be strings", "anything. \"\"\" np_keys = [] py_keys = [] for key in list(dic.keys()): val", "(c) <NAME>, <NAME> # See the file AUTHORS.rst for the full list of", "does not return anything. \"\"\" assert isinstance(X, dict) assert isinstance(Y, dict) for key", "the contents of X. skip_keys : list of strings skip_keys is a list", "np_all_1d = True for key in np_keys: val = dic[key] if (len(val.shape) >", "header = header + ' ' + key # --- dump data util.savetxtHeader(path", "\"\"\" np_keys = [] py_keys = [] for key in list(dic.keys()): val =", "for key in np_keys: arr = dic[key] # --- dump data util.savetxtHeader(path +", "for robustness, save any other Python data to JSON text files if (len(py_keys)", "--- dump data util.savetxtHeader(path + '.dat', header, arr) else: # --- we save", "function is called recursively. The keys should be strings to guarantee successful operation.", "successful operation. Parameters ---------- dic : dictionary A dictionary containing NumPy arrays or", "= dic[np_keys[0]] n_row = val.shape[0] n_col = len(np_keys) arr = np.zeros([n_row, n_col]) for", "in py_keys: file_name = path + '/' + key + '.json' util.md(file_name) with", "(e.g. to implement averaging operation). Parameters ---------- X : dict X is a", "of contributors. # # Released under the MIT License, see the file LICENSE.txt.", "NumPy arrays or other Python data structures. path : string Path where the", "Python dictionaries (with string keys) that contain summable data types. The operation is", "operation is skipped. Returns ------- None The function scale_values operates on X directly", "contains supported data types. The operation is applied to any value in X,", "and does not have any practical relevance. Returns ------- None The function write_dict", "for which the sum operation is skipped. Returns ------- None The function sum_values", "NumPy arrays) stored in dictionaries. In addition, an ASCII output routine is provided.", "with string keys that contains NumPy arrays. skip_keys : list of strings skip_keys", "skip_keys=['radii', 'frame']): \"\"\"Implement X += Y where X and Y are Python dictionaries", "= dic[key] if isinstance(val, dict): _path = path + '/' + key _level", "Returns ------- None The function sum_values operates on X directly and does not", "in np_keys: header = header + ' ' + key # --- dump", "python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4", "key not in X: X[key] = copy.deepcopy(Y[key]) else: X[key] = np.append(X[key], Y[key]) def", "string keys) that contain summable data types. The operation is applied to X", "skip_keys : list of strings skip_keys is a list of strings for which", "val.shape[0] n_col = len(np_keys) arr = np.zeros([n_row, n_col]) for idx, key in enumerate(np_keys):", "dict X is a dictionary with string keys that contains NumPy arrays. C", "key in enumerate(np_keys): arr[:, idx] = (dic[key])[:] # --- build header if rad", "build header if rad in np_keys: np_keys.remove(rad) header = '#' for key in", "histograms) that are summed. Parameters ---------- X : dict X is a dictionary", "fileencoding=utf-8 # # Cadishi --- CAlculation of DIStance HIstograms # # Copyright (c)", "value in Y, excluding keys that are in the list skip_keys. Typically, the", "np import json from . import util def sum_values(X, Y, skip_keys=['radii', 'frame']): \"\"\"Implement", "not in X: X[key] = copy.deepcopy(Y[key]) else: X[key] += Y[key] def scale_values(X, C,", "dictionary contains other dictionaries, the function is called recursively. The keys should be", "+ 1 write_dict(val, _path, _level) else: if isinstance(val, np.ndarray): np_keys.append(key) else: py_keys.append(key) #", "softtabstop=4 fileencoding=utf-8 # # Cadishi --- CAlculation of DIStance HIstograms # # Copyright", "more than one dimension separately for key in np_keys: arr = dic[key] #", "addition, an ASCII output routine is provided. \"\"\" import copy import numpy as", "key in np_keys: arr = dic[key] # --- dump data util.savetxtHeader(path + '/'", "function sum_values operates on X directly and does not return anything. \"\"\" assert", "array val = dic[np_keys[0]] n_row = val.shape[0] n_col = len(np_keys) arr = np.zeros([n_row,", "other Python data structures. path : string Path where the dictionary and its", "a dictionary with string keys that contains NumPy arrays. C : scalar, NumPy", "X[key] = np.append(X[key], Y[key]) def write_dict(dic, path, level=0): \"\"\"Write a dictionary containing NumPy", "that contains NumPy arrays. skip_keys : list of strings skip_keys is a list", "if (len(py_keys) > 0): for key in py_keys: file_name = path + '/'", "the function is called recursively. The keys should be strings to guarantee successful", "under the MIT License, see the file LICENSE.txt. \"\"\"Various NumPy- and dictionary-related utilities.", "# --- np_all_1d = True for key in np_keys: val = dic[key] if", "Level in the nested-dictionary hierarchy during recursive operation. This parameter was added for", "assert isinstance(X, dict) for key in list(X.keys()): if key in skip_keys: continue X[key]", "for numerical data (ie. NumPy arrays) stored in dictionaries. In addition, an ASCII", "(e.g. histograms) that are summed. Parameters ---------- X : dict X is a", "a dictionary containing NumPy arrays or other Python data structures to text files.", "the sum operation is skipped. Returns ------- None The function sum_values operates on", "key, arr) # --- (2) for robustness, save any other Python data to", "copy import numpy as np import json from . import util def sum_values(X,", "dump data util.savetxtHeader(path + '.dat', header, arr) else: # --- we save arrays", "Implements add, append, and scale operations for numerical data (ie. NumPy arrays) stored", "for which the sum operation is skipped. Returns ------- None The function scale_values", "# --- (2) for robustness, save any other Python data to JSON text", "def append_values(X, Y, skip_keys=['radii']): \"\"\"Implement X.append(Y) where X and Y are Python dictionaries", "Released under the MIT License, see the file LICENSE.txt. \"\"\"Various NumPy- and dictionary-related", "using a scalar C (e.g. to implement averaging operation). Parameters ---------- X :", "X.append(Y) where X and Y are Python dictionaries that contain NumPy data types.", "In addition, an ASCII output routine is provided. \"\"\" import copy import numpy", "which the sum operation is skipped. Returns ------- None The function sum_values operates", "Y, skip_keys=['radii']): \"\"\"Implement X.append(Y) where X and Y are Python dictionaries that contain", "key in np_keys: val = dic[key] if (len(val.shape) > 1): np_all_1d = False", "> 0): for key in py_keys: file_name = path + '/' + key", "= dic[key] # --- dump data util.savetxtHeader(path + '/' + key + '.dat',", "py_keys: file_name = path + '/' + key + '.json' util.md(file_name) with open(file_name,", "where the dictionary and its data shall be written to. level : int,", "arr = dic[key] # --- dump data util.savetxtHeader(path + '/' + key +", "'#' for key in np_keys: header = header + ' ' + key", "any value in X, excluding keys that are in the list skip_keys. Typically,", "of strings for which the append operation is skipped. Returns ------- None The", "n_col]) for idx, key in enumerate(np_keys): arr[:, idx] = (dic[key])[:] # --- build", "contains NumPy arrays. C : scalar, NumPy array C is a multiplier, either", "save arrays with more than one dimension separately for key in np_keys: arr", ": dictionary A dictionary containing NumPy arrays or other Python data structures. path", "continue if key not in X: X[key] = copy.deepcopy(Y[key]) else: X[key] += Y[key]", "LICENSE.txt. \"\"\"Various NumPy- and dictionary-related utilities. Implements add, append, and scale operations for", "= 'radii' if rad in np_keys: np_keys.remove(rad) np_keys.insert(0, rad) # --- np_all_1d =", "--- we save arrays with more than one dimension separately for key in", "(with string keys) that contain summable data types. The operation is applied to", "2d array val = dic[np_keys[0]] n_row = val.shape[0] n_col = len(np_keys) arr =", "C : scalar, NumPy array C is a multiplier, either a scalar of", "scale_values operates on X directly and does not return anything. \"\"\" assert isinstance(X,", "summed. Parameters ---------- X : dict X is a dictionary with string keys", ": list of strings skip_keys is a list of strings for which the", "dic[key] if isinstance(val, dict): _path = path + '/' + key _level =", "are appended. Parameters ---------- X : dict X is a dictionary with string", "= np.zeros([n_row, n_col]) for idx, key in enumerate(np_keys): arr[:, idx] = (dic[key])[:] #", "In case the dictionary contains other dictionaries, the function is called recursively. The", "AUTHORS.rst for the full list of contributors. # # Released under the MIT", "None The function write_dict does not return anything. \"\"\" np_keys = [] py_keys", "a NumPy array of size compatible with the contents of X. skip_keys :", "X[key] *= C def append_values(X, Y, skip_keys=['radii']): \"\"\"Implement X.append(Y) where X and Y", "keys should be strings to guarantee successful operation. Parameters ---------- dic : dictionary", "# --- concatenate arrays into a 2d array val = dic[np_keys[0]] n_row =", "of X are NumPy arrays (histograms) that are rescaled after summation using a", "are NumPy arrays (e.g. particle numbers) that are appended. Parameters ---------- X :", "None The function sum_values operates on X directly and does not return anything.", "the file AUTHORS.rst for the full list of contributors. # # Released under", "are rescaled after summation using a scalar C (e.g. to implement averaging operation).", "This parameter was added for debugging purposes and does not have any practical", "data types. The operation is applied to X for any value in Y,", "4; indent-tabs-mode:nil; coding: utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 #", "NumPy arrays (histograms) that are rescaled after summation using a scalar C (e.g.", "Parameters ---------- dic : dictionary A dictionary containing NumPy arrays or other Python", "data util.savetxtHeader(path + '/' + key + '.dat', '# ' + key, arr)", "anything. \"\"\" assert isinstance(X, dict) for key in list(X.keys()): if key in skip_keys:", "to text files rad = 'radii' if rad in np_keys: np_keys.remove(rad) np_keys.insert(0, rad)", "numerical data (ie. NumPy arrays) stored in dictionaries. In addition, an ASCII output", "operation). Parameters ---------- X : dict X is a dictionary with string keys", "not have any practical relevance. Returns ------- None The function write_dict does not", "not return anything. \"\"\" np_keys = [] py_keys = [] for key in", "list(Y.keys()): if key in skip_keys: continue if key not in X: X[key] =", "np_all_1d: # --- concatenate arrays into a 2d array val = dic[np_keys[0]] n_row", "append operation is skipped. Returns ------- None The function scale_values operates on X", "np_keys: np_keys.remove(rad) header = '#' for key in np_keys: header = header +", "------- None The function scale_values operates on X directly and does not return", "dump data util.savetxtHeader(path + '/' + key + '.dat', '# ' + key,", "else: X[key] += Y[key] def scale_values(X, C, skip_keys=['radii', 'frame']): \"\"\"Implement X = X", "header if rad in np_keys: np_keys.remove(rad) header = '#' for key in np_keys:", "data types. The operation is applied to any value in X, excluding keys", "full list of contributors. # # Released under the MIT License, see the", "dictionary with string keys that contains NumPy arrays. skip_keys : list of strings", "list of strings skip_keys is a list of strings for which the sum", "rescaled after summation using a scalar C (e.g. to implement averaging operation). Parameters", "# --- we save arrays with more than one dimension separately for key", "---------- X : dict X is a dictionary with string keys that contains", "\"\"\"Implement X = X times C where X is a Python dictionary that", "append_values(X, Y, skip_keys=['radii']): \"\"\"Implement X.append(Y) where X and Y are Python dictionaries that", "that are rescaled after summation using a scalar C (e.g. to implement averaging", "Y are NumPy arrays (e.g. particle numbers) that are appended. Parameters ---------- X", "key _level = level + 1 write_dict(val, _path, _level) else: if isinstance(val, np.ndarray):", "import numpy as np import json from . import util def sum_values(X, Y,", "we save arrays with more than one dimension separately for key in np_keys:", "'.dat', header, arr) else: # --- we save arrays with more than one", "implement averaging operation). Parameters ---------- X : dict X is a dictionary with", "skipped. Returns ------- None The function scale_values operates on X directly and does", "The function sum_values operates on X directly and does not return anything. \"\"\"", "key # --- dump data util.savetxtHeader(path + '.dat', header, arr) else: # ---", "The keys should be strings to guarantee successful operation. Parameters ---------- dic :", "data (ie. NumPy arrays) stored in dictionaries. In addition, an ASCII output routine", "rad = 'radii' if rad in np_keys: np_keys.remove(rad) np_keys.insert(0, rad) # --- np_all_1d", "applied to X for any value in Y, excluding keys that are in", "strings to guarantee successful operation. Parameters ---------- dic : dictionary A dictionary containing", "dictionaries, the function is called recursively. The keys should be strings to guarantee", "contains NumPy arrays. Y : dict Y is a dictionary with string keys", "does not have any practical relevance. Returns ------- None The function write_dict does", "is skipped. Returns ------- None The function sum_values operates on X directly and", "recursively. The keys should be strings to guarantee successful operation. Parameters ---------- dic", "Typically, the values of X, Y are NumPy arrays (e.g. particle numbers) that", "routine is provided. \"\"\" import copy import numpy as np import json from", "Python data structures to text files. In case the dictionary contains other dictionaries,", "values of X, Y are NumPy arrays (e.g. histograms) that are summed. Parameters", "strings skip_keys is a list of strings for which the append operation is", "C, skip_keys=['radii', 'frame']): \"\"\"Implement X = X times C where X is a", "expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # Cadishi --- CAlculation of DIStance HIstograms #", "(len(py_keys) > 0): for key in py_keys: file_name = path + '/' +", "skip_keys: continue if key not in X: X[key] = copy.deepcopy(Y[key]) else: X[key] =", "key in skip_keys: continue if key not in X: X[key] = copy.deepcopy(Y[key]) else:", "dictionary with string keys that contains NumPy arrays. Y : dict Y is", "License, see the file LICENSE.txt. \"\"\"Various NumPy- and dictionary-related utilities. Implements add, append,", "are summed. Parameters ---------- X : dict X is a dictionary with string", "'frame']): \"\"\"Implement X = X times C where X is a Python dictionary", "NumPy arrays. Y : dict Y is a dictionary with string keys that", "np_keys = [] py_keys = [] for key in list(dic.keys()): val = dic[key]", "--- np_all_1d = True for key in np_keys: val = dic[key] if (len(val.shape)", "is provided. \"\"\" import copy import numpy as np import json from .", "> 0): if np_all_1d: # --- concatenate arrays into a 2d array val", "Python data to JSON text files if (len(py_keys) > 0): for key in", "list(X.keys()): if key in skip_keys: continue X[key] *= C def append_values(X, Y, skip_keys=['radii']):", "idx] = (dic[key])[:] # --- build header if rad in np_keys: np_keys.remove(rad) header", "not in X: X[key] = copy.deepcopy(Y[key]) else: X[key] = np.append(X[key], Y[key]) def write_dict(dic,", "is a list of strings for which the append operation is skipped. Returns", "for key in list(Y.keys()): if key in skip_keys: continue if key not in", "+ ' ' + key # --- dump data util.savetxtHeader(path + '.dat', header,", "+ '/' + key _level = level + 1 write_dict(val, _path, _level) else:", "X is a dictionary with string keys that contains NumPy arrays. C :", "list of strings for which the append operation is skipped. Returns ------- None", "operation is applied to X for any value in Y, excluding keys that", "-*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*- # vim: tabstop=4 expandtab", "any other Python data to JSON text files if (len(py_keys) > 0): for", "times C where X is a Python dictionary that contains supported data types.", "def sum_values(X, Y, skip_keys=['radii', 'frame']): \"\"\"Implement X += Y where X and Y", "skip_keys is a list of strings for which the sum operation is skipped.", "NumPy data types. The operation is applied to X for any value in", "path, level=0): \"\"\"Write a dictionary containing NumPy arrays or other Python data structures", "-*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # Cadishi --- CAlculation", "of X. skip_keys : list of strings skip_keys is a list of strings", "strings for which the append operation is skipped. Returns ------- None The function", "hierarchy during recursive operation. This parameter was added for debugging purposes and does", "text files. In case the dictionary contains other dictionaries, the function is called", "dictionary and its data shall be written to. level : int, optional Level", "be strings to guarantee successful operation. Parameters ---------- dic : dictionary A dictionary", "summation using a scalar C (e.g. to implement averaging operation). Parameters ---------- X", "string keys that contains NumPy arrays. skip_keys : list of strings skip_keys is", "# # Released under the MIT License, see the file LICENSE.txt. \"\"\"Various NumPy-", "= level + 1 write_dict(val, _path, _level) else: if isinstance(val, np.ndarray): np_keys.append(key) else:", "int, optional Level in the nested-dictionary hierarchy during recursive operation. This parameter was", "that are summed. Parameters ---------- X : dict X is a dictionary with", "util.savetxtHeader(path + '.dat', header, arr) else: # --- we save arrays with more", "arrays into a 2d array val = dic[np_keys[0]] n_row = val.shape[0] n_col =", "operation is applied to any value in X, excluding keys that are in", "(ie. NumPy arrays) stored in dictionaries. In addition, an ASCII output routine is", "the dictionary and its data shall be written to. level : int, optional", "value in X, excluding keys that are in the list skip_keys. Typically, the", "NumPy arrays (e.g. particle numbers) that are appended. Parameters ---------- X : dict", "its data shall be written to. level : int, optional Level in the", "of DIStance HIstograms # # Copyright (c) <NAME>, <NAME> # See the file", "# # Copyright (c) <NAME>, <NAME> # See the file AUTHORS.rst for the", "robustness, save any other Python data to JSON text files if (len(py_keys) >", "'/' + key _level = level + 1 write_dict(val, _path, _level) else: if", "and its data shall be written to. level : int, optional Level in", "Y are Python dictionaries that contain NumPy data types. The operation is applied", "if rad in np_keys: np_keys.remove(rad) header = '#' for key in np_keys: header", "that are in the list skip_keys. Typically, the values of X, Y are", "in X: X[key] = copy.deepcopy(Y[key]) else: X[key] += Y[key] def scale_values(X, C, skip_keys=['radii',", "--- CAlculation of DIStance HIstograms # # Copyright (c) <NAME>, <NAME> # See", "arrays. skip_keys : list of strings skip_keys is a list of strings for", "np_keys.sort() py_keys.sort() # --- (1) save NumPy arrays to text files rad =", "from . import util def sum_values(X, Y, skip_keys=['radii', 'frame']): \"\"\"Implement X += Y", "a scalar C (e.g. to implement averaging operation). Parameters ---------- X : dict", "True for key in np_keys: val = dic[key] if (len(val.shape) > 1): np_all_1d", "are in the list skip_keys. Typically, the values of X, Y are NumPy", "np_keys.append(key) else: py_keys.append(key) # --- np_keys.sort() py_keys.sort() # --- (1) save NumPy arrays", "in np_keys: np_keys.remove(rad) np_keys.insert(0, rad) # --- np_all_1d = True for key in", "to any value in X, excluding keys that are in the list skip_keys.", "X, Y are NumPy arrays (e.g. histograms) that are summed. Parameters ---------- X", "provided. \"\"\" import copy import numpy as np import json from . import", "X times C where X is a Python dictionary that contains supported data", "dict) assert isinstance(Y, dict) for key in list(Y.keys()): if key in skip_keys: continue", "copy.deepcopy(Y[key]) else: X[key] += Y[key] def scale_values(X, C, skip_keys=['radii', 'frame']): \"\"\"Implement X =", "that contain summable data types. The operation is applied to X for any", "contributors. # # Released under the MIT License, see the file LICENSE.txt. \"\"\"Various", "skip_keys=['radii', 'frame']): \"\"\"Implement X = X times C where X is a Python", "isinstance(Y, dict) for key in list(Y.keys()): if key in skip_keys: continue if key", "is a dictionary with string keys that contains NumPy arrays. C : scalar,", "\"\"\"Write a dictionary containing NumPy arrays or other Python data structures to text", "file LICENSE.txt. \"\"\"Various NumPy- and dictionary-related utilities. Implements add, append, and scale operations", "Parameters ---------- X : dict X is a dictionary with string keys that", "a dictionary with string keys that contains NumPy arrays. Y : dict Y", "in X: X[key] = copy.deepcopy(Y[key]) else: X[key] = np.append(X[key], Y[key]) def write_dict(dic, path,", "# Cadishi --- CAlculation of DIStance HIstograms # # Copyright (c) <NAME>, <NAME>", "append, and scale operations for numerical data (ie. NumPy arrays) stored in dictionaries.", "in Y, excluding keys that are in the list skip_keys. Typically, the values", "Returns ------- None The function scale_values operates on X directly and does not", "excluding keys that are in the list skip_keys. Typically, the values of X,", "np_keys.remove(rad) np_keys.insert(0, rad) # --- np_all_1d = True for key in np_keys: val", "len(np_keys) arr = np.zeros([n_row, n_col]) for idx, key in enumerate(np_keys): arr[:, idx] =", "where X and Y are Python dictionaries (with string keys) that contain summable", "function write_dict does not return anything. \"\"\" np_keys = [] py_keys = []", "header + ' ' + key # --- dump data util.savetxtHeader(path + '.dat',", "(len(np_keys) > 0): if np_all_1d: # --- concatenate arrays into a 2d array", "keys that contains NumPy arrays. Y : dict Y is a dictionary with", "and Y are Python dictionaries (with string keys) that contain summable data types.", "dic : dictionary A dictionary containing NumPy arrays or other Python data structures.", "C is a multiplier, either a scalar of a NumPy array of size", "dict X is a dictionary with string keys that contains NumPy arrays. Y", "X: X[key] = copy.deepcopy(Y[key]) else: X[key] += Y[key] def scale_values(X, C, skip_keys=['radii', 'frame']):", "if key not in X: X[key] = copy.deepcopy(Y[key]) else: X[key] = np.append(X[key], Y[key])", "NumPy arrays to text files rad = 'radii' if rad in np_keys: np_keys.remove(rad)", "operation is skipped. Returns ------- None The function sum_values operates on X directly", "dict) for key in list(X.keys()): if key in skip_keys: continue X[key] *= C", "= path + '/' + key _level = level + 1 write_dict(val, _path,", "= len(np_keys) arr = np.zeros([n_row, n_col]) for idx, key in enumerate(np_keys): arr[:, idx]", "the MIT License, see the file LICENSE.txt. \"\"\"Various NumPy- and dictionary-related utilities. Implements" ]
[]
[ "i in range(0,24): if(i<10): i = '0'+str(i) else: i = str(i) print(day+i+' =", "<filename>grocercheck/scripts/lazy_coding_scripts/generateCodeForModel.py days = [\"mon\",'tue','wed','thu','fri','sat','sun'] for day in days: for i in range(0,24): if(i<10):", "= [\"mon\",'tue','wed','thu','fri','sat','sun'] for day in days: for i in range(0,24): if(i<10): i =", "in range(0,24): if(i<10): i = '0'+str(i) else: i = str(i) print(day+i+' = models.IntegerField(null=True)')", "for day in days: for i in range(0,24): if(i<10): i = '0'+str(i) else:", "in days: for i in range(0,24): if(i<10): i = '0'+str(i) else: i =", "for i in range(0,24): if(i<10): i = '0'+str(i) else: i = str(i) print(day+i+'", "[\"mon\",'tue','wed','thu','fri','sat','sun'] for day in days: for i in range(0,24): if(i<10): i = '0'+str(i)", "days = [\"mon\",'tue','wed','thu','fri','sat','sun'] for day in days: for i in range(0,24): if(i<10): i", "days: for i in range(0,24): if(i<10): i = '0'+str(i) else: i = str(i)", "day in days: for i in range(0,24): if(i<10): i = '0'+str(i) else: i" ]